diff --git a/.gitattributes b/.gitattributes index 9daa027f5aa7112f05af1eea612af682889d2a42..a382b63b6dde81cad63e0ec633169ae83b946f43 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1236,3 +1236,11 @@ data/2025/2504_08xxx/2504.08183/dd9ad541-c188-40b6-8fd4-ceae96bfad95_origin.pdf data/2025/2504_08xxx/2504.08192/ef5ba100-581f-437d-8edc-97d27f723fa4_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_08xxx/2504.08204/2b821f7b-0121-4811-b017-bc29978129ed_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_08xxx/2504.08358/c8c54375-95ac-4560-8575-6646aab725d1_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_content_list.json b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8feddbdb676af6ee59b5cb9235e6a1363efc5c46 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_content_list.json @@ -0,0 +1,3479 @@ +[ + { + "type": "text", + "text": "Scaling Laws for Native Multimodal Models", + "text_level": 1, + "bbox": [ + 274, + 130, + 722, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mustafa Shukor²", + "bbox": [ + 173, + 181, + 290, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Enrico Fini", + "bbox": [ + 323, + 181, + 408, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Victor Guilherme Turrisi da Costa1", + "bbox": [ + 444, + 181, + 679, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Matthieu Cord²", + "bbox": [ + 715, + 181, + 821, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Joshua Susskind", + "bbox": [ + 346, + 205, + 467, + 222 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alaaeldin El-Nouby", + "bbox": [ + 503, + 205, + 648, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Apple", + "bbox": [ + 367, + 229, + 426, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Sorbonne University", + "bbox": [ + 462, + 229, + 627, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 282, + 326, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Building general-purpose models that can effectively perceive the world through multimodal signals has been a long-standing goal. Current approaches involve integrating separately pre-trained components, such as connecting vision encoders to LLMs and continuing multimodal training. While such approaches exhibit remarkable sample efficiency, it remains an open question whether such late-fusion architectures are inherently superior. In this work, we revisit the architectural design of native multimodal models (NMMs)-those trained from the ground up on all modalities—and conduct an extensive scaling laws study, spanning 457 trained models with different architectures and training mixtures. Our investigation reveals no inherent advantage to late-fusion architectures over early-fusion ones, which do not rely on image encoders or tokenizers. On the contrary, early-fusion exhibits stronger performance at lower parameter counts, is more efficient to train, and is easier to deploy. Motivated by the strong performance of the early-fusion architectures, we show that incorporating Mixture of Experts (MoEs) allows models to learn modality-specific weights, significantly benefiting performance.", + "bbox": [ + 89, + 315, + 483, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 662, + 220, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodality provides a rich signal for perceiving and understanding the world. Advances in vision [23, 52, 55, 80] and language models [3, 19, 67] have enabled the development of powerful multimodal models that understand language, images, and audio. A common approach involves grafting separately pre-trained unimodal models, such as connecting a vision encoder to the input layer of an LLM [6, 9, 35, 43, 62, 64, 73, 78].", + "bbox": [ + 89, + 686, + 482, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although this seems like a convenient approach, it remains an open question whether such late-fusion strategies are inherently optimal for understanding multimodal signals. Moreover, with abundant multimodal data available, initializing from unimodal pre-training is potentially detrimental, as it may introduce biases that prevent the model", + "bbox": [ + 89, + 810, + 483, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 284, + 874, + 391 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg", + "image_caption": [ + "FLOPs", + "FLOPs", + "Figure 1. Scaling properties of Native Multimodal Models. Based on the scaling laws study in § 3.1, we observe: (1) early and late fusion models provide similar validation loss $L$ when trained with the same compute budget $C$ (FLOPs); (2) This performance is achieved via a different trade-off between parameters $N$ and number of training tokens $D$ , where early-fusion models require fewer parameters. (3) Sparse early-fusion models achieve lower loss and require more training tokens for a given FLOP budget." + ], + "image_footnote": [], + "bbox": [ + 517, + 407, + 874, + 517 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "from fully leveraging cross-modality co-dependancies. An additional challenge is scaling such systems; each component (e.g., vision encoder, LLM) has its own set of hyperparameters, pre-training data mixtures, and scaling properties with respect to the amount of data and compute applied. A more flexible architecture might allow the model to dynamically allocate its capacity across modalities, simplifying scaling efforts.", + "bbox": [ + 511, + 652, + 906, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we focus on the scaling properties of native multimodal models trained from the ground up on multimodal data. We first investigate whether the commonly adopted late-fusion architectures hold an intrinsic advantage by comparing them to early-fusion models, which process raw multimodal inputs without relying on dedicated vision encoders. We conduct scaling experiments on early and late fusion architectures, deriving scaling laws to pre", + "bbox": [ + 511, + 779, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07951v4 [cs.CV] 9 Aug 2025", + "bbox": [ + 22, + 279, + 58, + 717 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "dict their performance and compute-optimal configurations. Our findings indicate that late fusion offers no inherent advantage when trained from scratch. Instead, early-fusion models are more efficient and are easier to scale. Furthermore, we observe that native multimodal models follow scaling laws similar to those of LLMs [26], albeit with slight variations in scaling coefficients across modalities and datasets. Our results suggest that model parameters and training tokens should be scaled roughly equally for optimal performance. Moreover, we find that different multimodal training mixtures exhibit similar overall trends, indicating that our findings are likely to generalize to a broader range of settings.", + "bbox": [ + 89, + 90, + 480, + 286 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While our findings favor early fusion, multimodal data is inherently heterogeneous, suggesting that some degree of parameter specialization may still offer benefits. To investigate this, we explore leveraging Mixture of Experts (MoEs) [59], a technique that enables the model to dynamically allocate specialized parameters across modalities in a symmetric and parallel manner, in contrast to late-fusion models, which are asymmetric and process data sequentially. Training native multimodal models with MoEs results in significantly improved performance and therefore, faster convergence. Our scaling laws for MoEs suggest that scaling number of training tokens is more important than the number of active parameters. This unbalanced scaling is different from what is observed for dense models, due to the higher number of total parameters for sparse models. In addition, Our analysis reveals that experts tend to specialize in different modalities, with this specialization being particularly prominent in the early and last layers.", + "bbox": [ + 89, + 287, + 482, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1.1. Summary of our findings", + "text_level": 1, + "bbox": [ + 89, + 571, + 318, + 588 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our findings can be summarized as follows:", + "bbox": [ + 89, + 595, + 382, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Native Early and Late fusion perform on par: Early fusion models trained from scratch perform on par with their late-fusion counterparts, with a slight advantage to early-fusion models for low compute budgets (Figure 3). Furthermore, our scaling laws study indicates that the compute-optimal models for early and late fusion perform similarly as the compute budget increases (Figure 1 Top).", + "bbox": [ + 89, + 611, + 482, + 717 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "NMMs scale similarly to LLMs: The scaling laws of native multimodal models follow similar laws as text-only LLMs with slightly varying scaling exponents depending on the target data type and training mixture (Table 2).", + "bbox": [ + 89, + 717, + 482, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Late-fusion requires more parameters: Compute-optimal late-fusion models require a higher parameters-to-data ratio when compared to early-fusion (Figure 1 bottom).", + "bbox": [ + 89, + 779, + 482, + 824 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Sparsity significantly benefits early-fusion NMMs: Sparse NMMs exhibit significant improvements compared to their dense counterparts at the same inference cost (Figure 10). Furthermore, they implicitly learn modality-specific weights when trained with sparsity (Figure 12). In", + "bbox": [ + 89, + 825, + 482, + 901 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/d5047d4224dc4663381ccf110ebe91f262de136e0cc31df5c5930e78cd22c3cf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ExpressionDefinition
NNumber of parameters in the multimodal decoder. For MoEs this refers to the active parameters only.
DTotal number of multimodal tokens.
NvNumber of parameters in the vision-specific encoder. Only exists in late-fusion architectures.
DvNumber of vision-only tokens.
CTotal number of FLOPs, estimated as C = 6ND for early-fusion and C = 6(NvDv + ND) for late-fusion.
LValidation loss measured as the average over interleaved image-text, image-caption, and text-only data mixtures.
", + "bbox": [ + 519, + 89, + 903, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Definitions of the expressions used throughout the paper.", + "bbox": [ + 511, + 214, + 903, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "addition, compute-optimal models rely more on scaling the number of training tokens than the number of active parameters as the compute-budget grows (Figure 1 Bottom).", + "bbox": [ + 511, + 233, + 903, + 277 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modality-agnostic routing beats Modality-aware routing for Sparse NMMs: Training sparse mixture of experts with modality-agnostic routing consistently outperforms models with modality-aware routing (Figure 11).", + "bbox": [ + 511, + 279, + 903, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Preliminaries", + "text_level": 1, + "bbox": [ + 511, + 348, + 651, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Definitions", + "text_level": 1, + "bbox": [ + 511, + 373, + 633, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Native Multimodal Models (NMMs): Models that are trained from scratch on all modalities simultaneously without relying on pre-trained LLMs or vision encoders. Our focus is on the representative image and text modalities, where the model processes both text and images as input and generates text as output.", + "bbox": [ + 511, + 396, + 903, + 487 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Early fusion: Enabling multimodal interaction from the beginning, using almost no modality-specific parameters (e.g., except a linear layer to patchify images). Using a single transformer model, this approach processes raw multimodal input—tokenized text and continuous image patches—with no image discretization. In this paper, we refer to the main transformer as the decoder.", + "bbox": [ + 511, + 487, + 905, + 592 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Late fusion: Delaying the multimodal interaction to deeper layers, typically after separate unimodal components has processed that process each modality independently (e.g., a vision encoder connected to a decoder).", + "bbox": [ + 511, + 594, + 905, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modality-agnostic routing: In sparse mixture-of-experts, modality-agnostic routing refers to relying on a learned router module that is trained jointly with the model.", + "bbox": [ + 511, + 655, + 903, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modality-aware routing: Routing based on pre-defined rules such as routing based on the modality type (e.g., vision-tokens, token-tokens).", + "bbox": [ + 511, + 700, + 903, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Scaling Laws", + "text_level": 1, + "bbox": [ + 511, + 757, + 651, + 772 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We aim to understand the scaling properties of NMMs and how different architectural choices influence trade-offs. To this end, we analyze our models within the scaling laws framework proposed by Hoffmann et al. [26], Kaplan et al. [31]. We compute FLOPs based on the total number of parameters, using the approximation $C = 6ND$ , as adopted in prior work [2, 26]. However, we modify this estimation to suit our setup: for late-fusion models, FLOPs is computed", + "bbox": [ + 511, + 779, + 905, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg", + "image_caption": [ + "Figure 2. Scaling laws for early-fusion and late-fusion native multimodal models. Each point represents a model (300M to 3B parameters) trained on varying number of tokens (250M to 400B). We report the average cross-entropy loss on the validation sets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM)." + ], + "image_footnote": [], + "bbox": [ + 94, + 87, + 475, + 252 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "as $6(N_{v}D_{v} + ND)$ . We consider a setup where, given a compute budget $C$ , our goal is to predict the model's final performance, as well as determine the optimal number of parameters or number of training tokens. Consistent with prior studies on LLM scaling [26], we assume a power-law relationship between the final model loss and both model size $(N)$ and training tokens $(D)$ :", + "bbox": [ + 89, + 351, + 483, + 455 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL = E + \\frac {A}{N ^ {\\alpha}} + \\frac {B}{D ^ {\\beta}}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 460, + 480, + 491 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $E$ represents the lowest achievable loss on the dataset, while $\\frac{A}{N^{\\alpha}}$ captures the effect of increasing the number of parameters, where a larger model leads to lower loss, with the rate of improvement governed by $\\alpha$ . Similarly, $\\frac{B}{D^{\\beta}}$ accounts for the benefits of a higher number of tokens, with $\\beta$ determining the rate of improvement. Additionally, we assume a linear relationship between compute budget (FLOPs) and both $N$ and $D$ ( $C \\propto ND$ ). This further leads to power-law relationships detailed in Appendix C.7.", + "bbox": [ + 89, + 491, + 483, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Experimental setup", + "text_level": 1, + "bbox": [ + 89, + 636, + 277, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our models are based on the autoregressive transformer architecture [71] with SwiGLU FFNs [58] and QK-Norm [17] following Li et al. [39]. In early-fusion models, image patches are linearly projected to match the text token dimension, while late-fusion follows the CLIP architecture [55]. We adopt causal attention for text tokens and bidirectional attention for image tokens, we found this to work better. Training is conducted on a mixture of public and private multimodal datasets, including DCLM [39], Obelics [34], DFN [21], COYO [11], and a private collection of High-Quality Image-Text Pairs (HQITP). Images are resized to $224 \\times 224$ resolution with a $14 \\times 14$ patch size. We use a context length of 1k for the multimodal sequences. For training efficiency, we train our models with bfloat16, Fully Sharded Data Parallel (FSDP) [82], activation checkpointing, and gradient accumulation. We also use se", + "bbox": [ + 89, + 659, + 483, + 900 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/bbdc82534f1740e7817c2ab64a135366e6afbf6047c72c08d9e836e9c3a7a7ee.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
L = E + A/Nα + B/DβN ∝ CaD ∝CbL ∝CcD ∝Nd
ModelDataEαβabcd
GPT3 [10]Text------0.048
Chinchilla [26]Text1.6930.3390.2850.460.54-
NMM (early-fusion)Text2.2220.30840.33750.52460.4774-0.04200.9085
Image-Caption1.5690.31110.33860.52030.4785-0.06100.9187
Interleaved1.9660.29710.3380.53150.4680-0.04590.8791
AVG1.9040.3010.3350.52620.473-0.04920.8987
NMM (late-fusion)AVG1.8910.29030.33830.63580.4619-0.04940.6732
Sparse NMM (early-fusion)AVG2.1580.7100.3720.3610.656-0.0471.797
", + "bbox": [ + 516, + 88, + 903, + 242 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 2. Scaling laws for native multimodal models. We report the scaling laws results for early and late fusion models. We fit the scaling laws for different target data types as well as their average loss (AVG).", + "bbox": [ + 511, + 244, + 903, + 299 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "quence packing for the image captioning dataset to reduce the amount of padded tokens. Similar to previous works [2, 5, 26], we evaluate performance on held-out subsets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). Further implementation details are provided in Appendix A.", + "bbox": [ + 511, + 305, + 903, + 397 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Scaling native multimodal models", + "text_level": 1, + "bbox": [ + 511, + 412, + 820, + 430 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present a scaling laws study of native multimodal models, examining various architectural choices § 3.1, exploring different data mixtures § 3.2, analyzing the practical trade-offs between late and early fusion NMMs, and comparing the performance of native pretraining and continual pre-training of NMMs § 3.3.", + "bbox": [ + 511, + 439, + 905, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Setup. We train models ranging from 0.3B to 4B active parameters, scaling the width while keeping the depth constant. For smaller training token budgets, we reduce the warm-up phase to 1K steps while maintaining 5K steps for larger budgets. Following Hagele et al. [25], models are trained with a constant learning rate, followed by a cooldown phase using an inverse square root scheduler. The cool-down phase spans $20\\%$ of the total steps spent at the constant learning rate. To estimate the scaling coefficients in Eq 1, we apply the L-BFGS algorithm [51] and Huber loss [28] (with $\\delta = 10^{-3}$ ), performing a grid search over initialization ranges.", + "bbox": [ + 511, + 531, + 906, + 714 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Scaling laws of NMMs", + "text_level": 1, + "bbox": [ + 511, + 726, + 720, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scaling laws for early-fusion and late-fusion models. Figure 2 (left) presents the final loss averaged across interleaved, image-caption, and text datasets for early-fusion NMMs. The lowest-loss frontier follows a power law as a function of FLOPs. Fitting the power law yields the expression $L \\propto C^{-0.049}$ , indicating the rate of improvement with increasing compute. When analyzing the scaling laws per data type (e.g., image-caption, interleaved, text), we observe that the exponent varies (Table 2). For instance, the model achieves a higher rate of improvement for image-", + "bbox": [ + 511, + 750, + 906, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 89, + 349, + 242 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 89, + 609, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 89, + 879, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg", + "image_caption": [ + "Figure 3. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the number of model parameters and the number of training tokens. Overall, early fusion shows a slight advantage, especially at smaller model sizes, and the gap decreases when scaling the number of parameters $N$ ." + ], + "image_footnote": [], + "bbox": [ + 140, + 250, + 854, + 270 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "caption data $(L\\propto C^{-0.061})$ when compared to interleaved documents $(L\\propto C^{-0.046})$", + "bbox": [ + 88, + 340, + 482, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To model the loss as a function of the number of training tokens $D$ and model parameters $N$ , we fit the parametric function in Eq 1, obtaining scaling exponents $\\alpha = 0.301$ and $\\beta = 0.335$ . These describe the rates of improvement when scaling the number of model parameters and training tokens, respectively. Assuming a linear relationship between compute, $N$ , and $D$ (i.e., $C \\propto ND$ ), we derive the law relating model parameters to the compute budget (see Appendix C for details). Specifically, for a given compute budget $C$ , we compute the corresponding model size $N$ at logarithmically spaced $D$ values and determine $N_{opt}$ , the parameter count that minimizes loss. Repeating this across different FLOPs values produces a dataset of $(C, N_{opt})$ , to which we fit a power law predicting the compute-optimal model size as a function of compute: $N^{*} \\propto C^{0.526}$ .", + "bbox": [ + 89, + 378, + 483, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similarly, we fit power laws to estimate the compute-optimal training dataset size as a function of compute and model size:", + "bbox": [ + 89, + 613, + 483, + 656 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nD _ {o p t} \\propto C ^ {0. 4 7 3}, D _ {o p t} \\propto N ^ {0. 8 9 9}.\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 657, + 403, + 678 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These relationships allow practitioners to determine the optimal model and dataset size given a fixed compute budget. When analyzing by data type, we find that interleaved data benefits more from larger models ( $a = 0.532$ ) compared to image_caption data ( $a = 0.520$ ), whereas the opposite trend holds for training tokens.", + "bbox": [ + 89, + 681, + 483, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We conduct a similar study on late-fusion models in Figure 2 (right) and observe comparable scaling behaviors. In particular, the loss scaling exponent $(c = -0.0494)$ is nearly identical to that of early fusion $(c = -0.0492)$ . This trend is evident in Figure 3, where early fusion outperforms late fusion at smaller model scales, while both architectures converge to similar performance at larger model sizes. We also observe similar trends when varying late-fusion con", + "bbox": [ + 89, + 779, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg", + "image_caption": [ + "Figure 4. Early vs late: pretraining efficiency. Early-fusion is faster to train and consumes less memory. Models are trained on 16 H100 GPUs for 160k steps (300B tokens)." + ], + "image_footnote": [], + "bbox": [ + 517, + 342, + 707, + 502 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 343, + 903, + 502 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "figurations, such as using a smaller vision encoder with a larger text decoder Appendix B.", + "bbox": [ + 511, + 564, + 906, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Scaling laws of NMMs vs LLMs. Upon comparing the scaling law coefficients of our NMMs to those reported for text-only LLMs (e.g., GPT-3, Chinchilla), we find them to be within similar ranges. In particular, for predicting the loss as a function of compute, GPT-3 [10] follows $L \\propto C^{-0.048}$ , while our models follow $L \\propto C^{-0.049}$ , suggesting that the performance of NMMs adheres to similar scaling laws as LLMs. Similarly, our estimates of the $\\alpha$ and $\\beta$ parameters in Eq 1 ( $\\alpha = 0.301$ , $\\beta = 0.335$ ) closely match those reported by Hoffmann et al. [26] ( $\\alpha = 0.339$ , $\\beta = 0.285$ ). Likewise, our computed values of $a = 0.526$ and $b = 0.473$ align closely with $a = 0.46$ and $b = 0.54$ from [26], reinforcing the idea that, for native multimodal models, the number of training tokens and model parameters should be scaled proportionally. However, since the gap between $a$ and $b$ is smaller than in LLMs, this principle holds even more strongly for NMMs. Additionally, as $a = 0.526$ is greater than $b = 0.473$ in our case, the optimal model size for NMMs is larger than that of LLMs,", + "bbox": [ + 511, + 613, + 906, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg", + "image_caption": [ + "Figure 5. Scaling laws with different training mixtures. Early-fusion models follow similar scaling trends when changing the pretraining mixtures. However, increasing the image captions leads to a higher scaling exponent norm (see Table 3)." + ], + "image_footnote": [], + "bbox": [ + 102, + 90, + 897, + 265 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/ecb582a4915e09f0f5d7f795e92882a1d6a26f88d15c6eb5d012aeb43340d06e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
C-I-T (%)I/T ratioEαβabdc
145-45-101.191.9060.3010.3350.5270.4740.901-0.0492
240-20-400.651.9650.3280.3480.5180.4860.937-0.0486
330-30-400.591.8470.2530.3380.5720.4280.748-0.0463
420-40-400.491.8360.2590.3540.5820.4230.726-0.0488
", + "bbox": [ + 93, + 314, + 486, + 383 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3. Scaling laws for different training mixtures. Early-fusion models. C-I-T refer to image-caption, interleaved and text", + "bbox": [ + 89, + 383, + 483, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "while the optimal number of training tokens is lower, given a fixed compute budget.", + "bbox": [ + 89, + 417, + 483, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compute-optimal trade-offs for early vs. late fusion NMMs. While late- and early-fusion models reduce loss at similar rates with increasing FLOPs, we observe distinct trade-offs in their compute-optimal models. Specifically, $N_{opt}$ is larger for late-fusion models, whereas $D_{opt}$ is larger for early-fusion models. This indicates that, given a fixed compute budget, late-fusion models require a higher number of parameters, while early-fusion models benefit more from a higher number of training tokens. This trend is also reflected in the lower $\\frac{N_{opt}}{D_{opt}} \\propto C^{0.053}$ for early fusion compared to $\\frac{N_{opt}}{D_{opt}} \\propto C^{0.076}$ for late fusion. As shown in Figure 1 (bottom), when scaling FLOPs, the number of parameters of early fusion models becomes significantly lower, which is crucial for reducing inference costs and, consequently, lowering serving costs after deployment.", + "bbox": [ + 88, + 473, + 483, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Early-fusion is more efficient to train. We compare the training efficiency of late- and early-fusion architectures. As shown in Figure 4, early-fusion models consume less memory and train faster under the same compute budget. This advantage becomes even more pronounced as compute increases, highlighting the superior training efficiency of early fusion while maintaining comparable performance to late fusion at scale. Notably, for the same FLOPs, late-fusion models have a higher parameter count and higher effective depth (i.e., additional vision encoder layers alongside decoder layers) compared to early-fusion models.", + "bbox": [ + 88, + 734, + 483, + 902 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg", + "image_caption": [ + "Figure 7. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models attain a favorable performance when increasing the proportion of interleaved documents and text-only data." + ], + "image_footnote": [], + "bbox": [ + 517, + 316, + 709, + 469 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 714, + 316, + 880, + 469 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Scaling laws for different data mixtures", + "text_level": 1, + "bbox": [ + 511, + 541, + 852, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We investigate how variations in the training mixture affect the scaling laws of native multimodal models. To this end, we study four different mixtures that reflect common community practices [34, 41, 46, 81], with Image Caption-Interleaved-Text ratios of 45-45-10 (our default setup), 30-30-40, 40-20-40, and 20-40-40. For each mixture, we conduct a separate scaling study by training 76 different models, following our setup in § 3.1. Overall, Figure 5 shows that different mixtures follow similar scaling trends; however, the scaling coefficients vary depending on the mixture (Table 3). Interestingly, increasing the proportion of image-caption data (mixtures 1 and 2) leads to lower $a$ and higher $b$ , whereas increasing the ratio of interleaved and text data (mixtures 3 and 4) have the opposite effect. Notably, image-caption data contains more image tokens than text tokens; therefore, increasing its proportion results in more image tokens, while increasing interleaved and text data increases text token counts. This suggests that, when image tokens are prevalent, training for longer decreases the loss faster than increasing the model size. We also found that for a fixed model size, increasing text-only and interleaved data ratio is in favor of early-fusion Figure 7.", + "bbox": [ + 511, + 566, + 908, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg", + "image_caption": [ + "Figure 8. Early native vs initializing from LLMs: initializing from pre-trained models and scaling training tokens. We compare training with and without initializing from DCLM-1B." + ], + "image_footnote": [], + "bbox": [ + 96, + 88, + 495, + 251 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3. Native multimodal pre-training vs. continual training of LLMs", + "text_level": 1, + "bbox": [ + 89, + 303, + 483, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we compare training natively from scratch to continual training after initializing from a pre-trained LLM. We initialize the model from DCLM-1B [21] that is trained on more than 2T tokens. Figure 8 shows that native multimodal models can close the gap with initialized models when trained for longer. Specifically, on image captioning data, the model requires fewer than 100B multimodal tokens to reach comparable performance. However, on interleaved and text data, the model may need longer training—up to 1T tokens. Considering the cost of pre-training, these results suggest that training natively could be a more efficient approach for achieving the same performance on multimodal benchmarks.", + "bbox": [ + 88, + 340, + 483, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Towards multimodal specialization", + "text_level": 1, + "bbox": [ + 89, + 556, + 408, + 573 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Previously, we demonstrated that early-fusion models achieve performance on par with late-fusion models under a fixed compute budget. However, multimodal data is inherently heterogeneous, and training a unified model to fit such diverse distributions may be suboptimal. Here, we argue for multimodal specialization within a unified architecture. Ideally, the model should implicitly adapt to each modality, for instance, by learning modality-specific weights or specialized experts. Mixture of Experts is a strong candidate for this approach, having demonstrated effectiveness in LLMs. In this section, we highlight the advantages of sparse early-fusion models over their dense counterparts.", + "bbox": [ + 89, + 582, + 482, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setup. Our sparse models are based on the dropless-MoE implementation of Gale et al. [24], which eliminates token dropping during training caused by expert capacity constraints. We employ a top- $k$ expert-choice routing mechanism, where each token selects its top- $k$ experts among the $E$ available experts. Specifically, we set $k = 1$ and $E = 8$ , as we find this configuration to work effectively. Additionally, we incorporate an auxiliary load-balancing loss [59] with a weight of 0.01 to ensure a balanced expert utilization.", + "bbox": [ + 89, + 763, + 482, + 901 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg", + "image_caption": [ + "Figure 9. Scaling laws for sparse early-fusion NMMs. We report the final validation loss averaged across interleaved, image-captions and text data." + ], + "image_footnote": [], + "bbox": [ + 517, + 85, + 877, + 244 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following Abnar et al. [2], we compute training FLOPs as $6ND$ , where $N$ represents the number of active parameters.", + "bbox": [ + 511, + 292, + 903, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Sparse vs dense NMMs when scaling FLOPs", + "text_level": 1, + "bbox": [ + 511, + 333, + 890, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare sparse MoE models to their dense counterparts by training models with different numbers of active parameters and varying amounts of training tokens. Figure 10 shows that, under the same inference cost (or number of active parameters), MoEs significantly outperform dense models. Interestingly, this performance gap is more pronounced for smaller model sizes. This suggests that MoEs enable models to handle heterogeneous data more effectively and specialize in different modalities. However, as dense models become sufficiently large, the gap between the two architectures gradually closes.", + "bbox": [ + 511, + 354, + 906, + 521 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Scaling laws for sparse early-fusion models", + "text_level": 1, + "bbox": [ + 511, + 530, + 879, + 546 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train different models (ranging from 300M to 3.4B active parameters) on varying amounts of tokens (ranging from 250M to 600B) and report the final loss in Figure 9. We fit a power law to the convex hull of the lowest loss as a function of compute (FLOPs). Interestingly, the exponent $(-0.048)$ is close to that of dense NMMs $(-0.049)$ , indicating that both architectures scale similarly. However, the multiplicative constant is smaller for MoEs (27.086) compared to dense models (29.574), revealing lower loss. Additionally, MoEs require longer training to reach saturation compared to dense models (Appendix C for more details). We also predict the coefficients of Eq 1 by considering $N$ as the number of active parameters. Table 2 shows significantly higher $\\alpha$ compared to dense models. Interestingly, $b$ is significantly higher than $a$ , revealing that the training tokens should be scaled at a higher rate than the number of parameters when training sparse NMMs. We also experiment with a scaling law that takes into account the sparsity [2] and reached similar conclusions in Appendix C.7.", + "bbox": [ + 511, + 551, + 906, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Modality-aware vs. Modality-agnostic routing", + "text_level": 1, + "bbox": [ + 511, + 848, + 898, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Another alternative to MoEs is modality-aware routing, where multimodal tokens are assigned to experts based on", + "bbox": [ + 511, + 869, + 903, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg", + "image_caption": [ + "Figure 10. MoE vs Dense: scaling training FLOPs. We compare MoE and dense early-fusion models when scaling both the amount of training tokens and model sizes. MoEs beat dense models when matching the number of active parameters." + ], + "image_footnote": [], + "bbox": [ + 96, + 89, + 467, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "their modalities, similar to previous works [7, 75]. We train models with distinct image and text experts in the form of FFNs, where image tokens are processed only by the image FFN and text tokens only by the text FFN. Compared to modality-aware routing, MoEs exhibit significantly better performance on both image-caption and interleaved data as presented in Figure 11.", + "bbox": [ + 89, + 353, + 483, + 459 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Emergence of expert specialization and sharing", + "text_level": 1, + "bbox": [ + 89, + 493, + 483, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We investigate multimodal specialization in MoE architectures. In Figure 13, we visualize the normalized number of text and image tokens assigned to each expert across layers. To quantify this specialization, we compute a specialization score, defined as the average, across all experts within a layer, of $1 - H(p)$ , where $H$ is the binary entropy of each expert's text/image token distribution. We plot this specialization score in Figure 12. Higher specialization scores indicate a tendency for experts to focus on either text or image tokens, while lower scores indicate a shared behavior. These visualizations provide clear evidence of modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases, before rising again in the last layers. This suggests that early and final layers exhibit higher modality specialization compared to mid-layers. This behavior is intuitive, as middle layers are expected to hold higher-level features that may generalize across modalities, and consistent with findings in [61] that shows increasing alignment between modalities across layers. The emergence of both expert specialization and cross-modality sharing in our modality-agnostic MoE, suggests it may be a preferable approach compared to modality-aware sparsity. All data displayed here is from an early-fusion MoE model with 1B active parameters trained for 300B tokens.", + "bbox": [ + 89, + 523, + 483, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ca37c4a7a1a0179f3629d000ae32bcaef96674560f551745f099a33c45e3a31c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
Late-fusion46.869.425.850.165.822.870.750.9
Early-fusion47.669.328.152.165.423.272.053.8
Early-MoEs48.269.830.052.165.423.669.655.7
", + "bbox": [ + 517, + 90, + 903, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Supervised finetuning on the LLaVA mixture. All models are native at 1.5B scale and pre-trained on 300B tokens.", + "bbox": [ + 513, + 162, + 903, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 205, + 712, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 715, + 207, + 885, + 354 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg", + "image_caption": [ + "Figure 11. Modality-aware vs modality agnostic routing for sparse NMMs. We compare modality-agnostic routing with modality-aware routing when scaling both the amount of training tokens and model sizes." + ], + "image_footnote": [], + "bbox": [ + 516, + 363, + 901, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Evaluation on downstream tasks with SFT", + "text_level": 1, + "bbox": [ + 511, + 465, + 890, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Following previous work on scaling laws, we primarily rely on validation losses. However, we generally find that this evaluation correlates well with performance on downstream tasks. To validate this, we conduct a multimodal instruction tuning stage (SFT) on the LLaVA mixture [43] and report accuracy and CIDEr scores across several VQA and captioning tasks. Table 4 confirms the ranking of different model configurations. Specifically, early fusion outperforms late fusion, and MoEs outperform dense models. However, since the models are relatively small (1.5B scale), trained from scratch, and fine-tuned on a small dataset, the overall scores are lower than the current state of the art. Further implementation details can be found in Appendix A.", + "bbox": [ + 511, + 492, + 906, + 689 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6. Related work", + "text_level": 1, + "bbox": [ + 511, + 707, + 650, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Large multimodal models. A long-standing research goal has been to develop models capable of perceiving the world through multiple modalities, akin to human sensory experience. Recent progress in vision and language processing has shifted the research focus from smaller, task-specific models toward large, generalist models that can handle diverse inputs [29, 67]. Crucially, pre-trained vision and language backbones often require surprisingly little adaptation to enable effective cross-modal communication [32, 47, 62, 68, 69]. Simply integrating a vision encoder with either an encoder-decoder architecture [45, 48, 63, 72]", + "bbox": [ + 511, + 734, + 906, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg", + "image_caption": [ + "Figure 12. MoE specialization score. Entropy-based image/text specialization score (as described in § 4.4) across layers for two data sources: HQITP and Obelics. HQITP has a more imbalanced image-to-text token distribution, resulting in generally higher specialization. Despite this difference, both data sources exhibit a similar trend: the specialization score decreases in the early layers before increasing again in the final layers." + ], + "image_footnote": [], + "bbox": [ + 102, + 89, + 475, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "or a decoder-only LLM has yielded highly capable multimodal systems [1, 6, 9, 13, 16, 35, 43, 49, 64, 73, 78, 83]. This late-fusion approach, where modalities are processed separately before being combined, is now well-understood, with established best practices for training effective models [34, 41, 46, 81]. In contrast, early-fusion models [8, 18, 66], which combine modalities at an earlier stage, remain relatively unexplored, with only a limited number of publicly released models [8, 18]. Unlike [18, 66], our models utilize only a single linear layer and rely exclusively on a next-token prediction loss. Furthermore, we train our models from scratch on all modalities without image tokenization.", + "bbox": [ + 89, + 349, + 482, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Native Multimodal Models. We define native multimodal models as those trained from scratch on all modalities simultaneously [67] rather than adapting LLMs to accommodate additional modalities. Due to the high cost of training such models, they remain relatively underexplored, with most relying on late-fusion architectures [27, 79]. Some multimodal models trained from scratch [4, 66, 76] relax this constraint by utilizing pre-trained image tokenizers such as [20, 70] to convert images into discrete tokens, integrating them into the text vocabulary. This approach enables models to understand and generate text and images, facilitating a more seamless multimodal learning process.", + "bbox": [ + 89, + 535, + 482, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Scaling laws. Scaling law studies aim to predict how model performance scales with training compute. Early works [26, 31] found that LLM performance follows a power-law relationship with compute, enabling the compute-optimal estimation of the number of model parameters and training tokens at scale for a given budget. Similar research has extended these findings to sparse Mixture of Experts (MoE) models, considering factors such as sparsity, number of experts, and routing granularity [15, 33, 74]. Scaling laws have also been observed across various domains, including image models [23], video models [56], protein LLMs [14], and imitation learning [54]. However, few stud", + "bbox": [ + 89, + 719, + 482, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg", + "image_caption": [ + "Figure 13. MoE specialization frequency. Percentage of text and image tokens routed to each expert on interleaved data from Obelics. Experts are ordered for better visualization. The first layer shows the highest amount of unimodal experts." + ], + "image_footnote": [], + "bbox": [ + 516, + 89, + 648, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 89, + 774, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 89, + 897, + 172 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ies have investigated scaling laws for multimodal models. Notably, Aghajanyan et al. [5] examined multimodal models that tokenize modalities into discrete tokens and include multimodal generation. In contrast, we focus on studying early-fusion models that take raw multimodal inputs and are trained on interleaved multimodal data.", + "bbox": [ + 511, + 236, + 906, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Mixture of experts (MoEs). MoEs [59] scale model capacity efficiently by sparsely activating parameters, enabling large models with reduced per-sample compute. While widely studied in LLMs [22, 30, 36, 37, 42, 65, 77, 84], MoEs remain underexplored in multimodal settings. Prior work has examined contrastive models [50], late-fusion LLMs [38, 40], and modality-specific experts [7, 12, 60]. We focus on analyzing MoEs in early-fusion multimodal models.", + "bbox": [ + 511, + 333, + 908, + 469 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Limitations", + "text_level": 1, + "bbox": [ + 513, + 502, + 635, + 517 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our study finds that scaling law coefficients are broadly consistent across training mixtures, though a broader exploration is needed to validate this observation. While validation loss scales predictably with compute, the extent to which this correlates with downstream performance remains unclear and warrants further investigation. The accuracy of scaling law predictions improves with higher FLOPs, but their extrapolation to extreme model sizes is still an open question (Appendix D for more details).", + "bbox": [ + 511, + 534, + 908, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 702, + 633, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We explore various strategies for compute-optimal pretraining of native multimodal models. We found the NMMs follow similar scaling laws to those of LLMs. Contrary to common belief, we find no inherent advantage in adopting late-fusion architectures over early-fusion ones. While both architectures exhibit similar scaling properties, early-fusion models are more efficient to train and outperform late-fusion models at lower compute budgets. Furthermore, we show that sparse architectures encourage modality-specific specialization, leading to performance improvements while maintaining the same inference cost.", + "bbox": [ + 511, + 734, + 908, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment", + "text_level": 1, + "bbox": [ + 91, + 90, + 240, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We thank Philipp Dufter, Samira Abnar, Xiujun Li, Zhe Gan, Alexander Toshev, Yinfei Yang, Dan Busbridge, and Jason Ramapuram for many fruitful discussions. We thank Denise Hui, and Samy Bengio for infra and compute support. Finally, we thank, Louis Bethune, Pierre Ablin, Marco Cuturi, and the MLR team at Apple for their support throughout the project.", + "bbox": [ + 89, + 114, + 483, + 220 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 237, + 187, + 253 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 8", + "[2] Samira Abnar, Harshay Shah, Dan Busbridge, Alaaeldin Mohamed Elnouby Ali, Josh Susskind, and Vimal Thilak. Parameters vs flops: Scaling laws for optimal sparsity for mixture-of-experts language models. arXiv preprint arXiv:2501.12370, 2025. 2, 3, 6, 18, 20", + "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1", + "[4] Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. 8", + "[5] Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In International Conference on Machine Learning, pages 265-279. PMLR, 2023. 3, 8", + "[6] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 1, 8", + "[7] Hangbo Bao, Wenhui Wang, Li Dong, Qiang Liu, Owais Khan Mohammed, Kriti Aggarwal, Subhojit Som, and Furu Wei. Vlmo: Unified vision-language pretraining with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358, 2021. 7, 8", + "[8] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Săgnak Taşirlar. Introducing our multimodal models, 2023. 8", + "[9] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1, 8" + ], + "bbox": [ + 99, + 262, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[10] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3, 4", + "[11] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3, 13", + "[12] Junyi Chen, Longteng Guo, Jia Sun, Shuai Shao, Zehuan Yuan, Liang Lin, and Dongyu Zhang. Eve: Efficient vision-language pre-training with masked prediction and modality-aware moe. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1110-1119, 2024. 8", + "[13] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 8", + "[14] Xingyi Cheng, Bo Chen, Pan Li, Jing Gong, Jie Tang, and Le Song. Training compute-optimal protein language models. bioRxiv, 2024. 8", + "[15] Aidan Clark, Diego de Las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bogdan Damoc, Blake Hechtman, Trevor Cai, Sebastian Borgeaud, et al. Unified scaling laws for routed language models. In International conference on machine learning, pages 4057-4086. PMLR, 2022. 8", + "[16] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024.8", + "[17] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3", + "[18] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. arXiv preprint arXiv:2406.11832, 2024.8", + "[19] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1", + "[20] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12873-12883, 2021. 8", + "[21] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 3, 6, 13" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[22] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 8", + "[23] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders, 2024. 1, 8", + "[24] Trevor Gale, Deepak Narayanan, Cliff Young, and Matei Zaharia. Megablocks: Efficient sparse training with mixture-of-experts. Proceedings of Machine Learning and Systems, 5:288-304, 2023. 6", + "[25] Alexander Hagele, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 3", + "[26] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, pages 30016-30030, 2022. 2, 3, 4, 8, 17", + "[27] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, et al. Language is not all you need: Aligning perception with language models. Advances in Neural Information Processing Systems, 36:72096-72109, 2023. 8", + "[28] Peter J. Huber. Robust Estimation of a Location Parameter, pages 492-518. Springer New York, New York, NY, 1992. 3", + "[29] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 7", + "[30] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 8", + "[31] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 2, 8, 15", + "[32] Jing Yu Koh, Ruslan Salakhutdinov, and Daniel Fried. Grounding language models to images for multimodal inputs and outputs. In International Conference on Machine Learning, pages 17283-17300. PMLR, 2023. 7", + "[33] Jakub Krajewski, Jan Ludziejewski, Kamil Adamczewski, Maciej Pioro, Michal Krutul, Szymon Antoniak, Kamil Ciebiera, Krystian Król, Tomasz Odrzygoźdź, Piotr Sankowski, et al. Scaling laws for fine-grained mixture of experts. arXiv preprint arXiv:2402.07871, 2024. 8, 18" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[34] Hugo Laurencon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024. 3, 5, 8, 13", + "[35] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? arXiv preprint arXiv:2405.02246, 2024. 1, 8", + "[36] Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668, 2020. 8", + "[37] Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. Base layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265-6274. PMLR, 2021. 8", + "[38] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024. 8", + "[39] Jeffrey Li, Alex Fang, Georgios Smyrnis, Maor Ivgi, Matt Jordan, Samir Gadre, Hritik Bansal, Etash Guha, Sedrick Keh, Kushal Arora, et al. Datacomp-lm: In search of the next generation of training sets for language models. arXiv preprint arXiv:2406.11794, 2024. 3, 13, 15", + "[40] Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947, 2024. 8", + "[41] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 5, 8", + "[42] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 8", + "[43] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 1, 7, 8", + "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 13", + "[45] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. In The Eleventh International Conference on Learning Representations, 2022. 7", + "[46] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Duffer, Dhruti Shah, Xianzhi Du, Futang Peng, Anton Belyi, et al. Mm1: methods, analysis and insights from multimodal llm pre-training. In European Conference on Computer Vision, pages 304–323. Springer, 2025. 5, 8, 13" + ], + "bbox": [ + 516, + 90, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 925, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. Linearly mapping from image to text space. In *The Eleventh International Conference on Learning Representations*, 2023. 7", + "[48] David Mizrahi, Roman Bachmann, Oguzhan Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling. Advances in Neural Information Processing Systems, 36:58363-58408, 2023. 7", + "[49] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. 8", + "[50] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. Advances in Neural Information Processing Systems, 35:9564-9576, 2022. 8", + "[51] Jorge Nocedal. Updating quasi newton matrices with limited storage. Mathematics of Computation, 35(151):951-958, 1980. 3", + "[52] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 1", + "[53] Tim Pearce and Jinyeop Song. Reconciling kaplan and chinchilla scaling laws. arXiv preprint arXiv:2406.12907, 2024. 15", + "[54] Tim Pearce, Tabish Rashid, Dave Bignell, Raluca Georgescu, Sam Devlin, and Katja Hofmann. Scaling laws for pre-training agents and world models. arXiv preprint arXiv:2411.04434, 2024. 8", + "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 1, 3, 15", + "[56] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pretraining from videos. arXiv preprint arXiv:2501.05453, 2025.8", + "[57] Kanchana Ranasinghe, Brandon McKinzie, Sachin Ravi, Yinfei Yang, Alexander Toshev, and Jonathon Shlens. Perceptual grouping in contrastive vision-language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5571-5584, 2023. 13", + "[58] Noam Shazeer. Glu variants improve transformer. arXiv preprint arXiv:2002.05202, 2020. 3", + "[59] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 6, 8", + "[60] Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, and Yuxiong He. Scaling vision-language models with sparse mixture of experts. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. 8", + "[61] Mustafa Shukor and Matthieu Cord. Implicit multimodal alignment: On the generalization of frozen llms to multimodal inputs. Advances in Neural Information Processing Systems, 37:130848-130886, 2024. 7", + "[62] Mustafa Shukor, Corentin Dancette, and Matthieu Cord. eplalm: Efficient perceptual augmentation of language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22056-22069, 2023. 1, 7", + "[63] Mustafa Shukor, Corentin Dancette, Alexandre Rame, and Matthieu Cord. Unival: Unified model for image, video, audio and language tasks. Transactions on Machine Learning Research Journal, 2023. 7", + "[64] Mustafa Shukor, Dana Aubakirova, Francesco Capuano, Pepijn Kooijmans, Steven Palma, Adil Zoutine, Michel Ar-actingi, Caroline Pascal, Martino Russi, Andres Marafioti, et al. Smolvla: A vision-language-action model for affordable and efficient robotics. arXiv preprint arXiv:2506.01844, 2025. 1, 8", + "[65] Xingwu Sun, Yanfeng Chen, Yiqing Huang, Ruobing Xie, Jiaqi Zhu, Kai Zhang, Shuaipeng Li, Zhen Yang, Jonny Han, Xiaobo Shu, et al. Hunyuan-large: An open-source moe model with 52 billion activated parameters by tencent. arXiv preprint arXiv:2411.02265, 2024. 8", + "[66] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 8", + "[67] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 7, 8", + "[68] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyals, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 7", + "[69] Théophane Vallaeys, Mustafa Shukor, Matthieu Cord, and Jakob Verbeek. Improved baselines for data-efficient perceptual augmentation of llms. arXiv preprint arXiv:2403.13499, 2024. 7", + "[70] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 8", + "[71] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 3", + "[72] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. 7" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[73] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8", + "[74] Siqi Wang, Zhengyu Chen, Bei Li, Keqing He, Min Zhang, and Jingang Wang. Scaling laws across model architectures: A comparative analysis of dense and MoE models in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5583-5595, Miami, Florida, USA, 2024. Association for Computational Linguistics. 8, 18", + "[75] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 7", + "[76] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 8", + "[77] Tianwen Wei, Bo Zhu, Liang Zhao, Cheng Cheng, Biye Li, Weiwei Lu, Peng Cheng, Jianhao Zhang, Xiaoyu Zhang, Liang Zeng, et al. Skywork-moe: A deep dive into training techniques for mixture-of-experts language models. arXiv preprint arXiv:2406.06563, 2024.8", + "[78] Le Xue, Manli Shu, Anas Awadalla, Jun Wang, An Yan, Senthil Purushwalkam, Honglu Zhou, Viraj Prabhu, Yutong Dai, Michael S Ryoo, et al. xgen-mm (blip-3): A family of open large multimodal models. arXiv preprint arXiv:2408.08872, 2024. 1, 8", + "[79] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 8", + "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 1", + "[81] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Duffer, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 5, 8", + "[82] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 3", + "[83] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 8", + "[84] Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906, 2022. 8" + ], + "bbox": [ + 91, + 92, + 480, + 898 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Scaling Laws for Native Multimodal Models Supplementary Material", + "text_level": 1, + "bbox": [ + 274, + 85, + 722, + 138 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This supplementary material is organized as follows:", + "bbox": [ + 89, + 156, + 439, + 172 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Appendix A: contains the implementation details and the hyperparameters used to train our models.", + "- Appendix B: contains detailed comparison between early and late fusion models.", + "- Appendix C: contains more details about scaling laws derivation, evaluation and additional results.", + "- Appendix D: contains discussion about the paper limitations.", + "- Appendix E: contains more results about MoEs and modality specialization." + ], + "bbox": [ + 89, + 195, + 482, + 345 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. Experimental setup", + "text_level": 1, + "bbox": [ + 89, + 395, + 282, + 412 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In Table 6, we show the pre-training hyperparameters for different model configurations used to derive the scaling laws. The number of parameters ranges from 275M to 3.7B, with model width increasing accordingly, while the depth remains fixed at 24 layers. Learning rates vary by model size, decreasing as the model scales up. Based on empirical experiments and estimates similar to [46], we found these values to be effective in our setup. Training is optimized using a fully decoupled AdamW optimizer with momentum values $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.95$ , and a weight decay of $1\\mathrm{e} - 4$ . The batch size is set to 2k samples, which account for 2M tokens, given 1k context length. Gradient clipping is set to 1.0, with a maximum warmup duration of 5k iterations, adjusted for shorter training runs: 1k and 2.5k warmup steps for models trained between 1k-4k and 5k-15k steps, respectively. For MoEs, we found that longer warmup is significantly better, so we adopt a 2.5k warmup for all runs under 20k steps. We use a constant learning rate schedule with cooldown during the final $20\\%$ of training, gradually reducing to zero following an inverse square root schedule. For vision processing, image inputs are divided into (14, 14) patches, with augmentations including Random Resized Crop (resizing images to 224px with a scale range of [0.4, 1.0]) and Random Horizontal Flip with a probability of 0.5. We train our models on mixture of interleaved, image captions and text only data Table 5. For late fusion models, we found that using smaller learning rate for the vision encoder significantly boost the performance Table 8, and when both the encoder and decoder are initialized (Appendix B.7) we found that freezing the vision encoder works best Table 7.", + "bbox": [ + 89, + 431, + 483, + 898 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data typedataset#samplessampling prob.
DFN [21]2B27%
Image-CaptionCOYO [11]600M11.25%
HQITP[57]400M6.75%
InterleavedObelics [34]141M Docs45%
TextDCLM [39]6.6T Toks10%
", + "bbox": [ + 517, + 154, + 903, + 224 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg", + "table_caption": [ + "Table 5. Pre-training data mixture. Unless otherwise specified, the training mixture contains $45\\%$ , $45\\%$ and $10\\%$ of image captions, interleaved documents and text-only data." + ], + "table_footnote": [], + "table_body": "
Early-fusion
Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35e-44.2e-44e-43.5e-4
Late-fusion
Params289M494M1B1.75B2.43B3.7B
vision encoder width384512768102411841536
vision encoder depth24
width76810241536204824643072
depth24
Learning rate1.5e-31.5e-35e-44.2e-43.8e-43.3e-4
Early-fusion MoEs
Active Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35 e-44.2e-44e-43.5e-4
Training tokens2.5B-600B
OptimizerFully decoupled AdamW [44]
Optimizer Momentumβ1=0.9, β2=0.95
Minimum Learning rate0
Weight decay1e-4
Batch size2k
Patch size(14, 14)
Gradient clipping1.0
MAximum Warmup iterations5k
Augmentations: \nRandomResizedCrop \nsize224px
scale[0.4, 1.0]
RandomHorizontalFlipp=0.5
", + "bbox": [ + 516, + 292, + 903, + 664 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/bd11f503cbb83405e5763b94f0bf05d0647d5566a363956255066979df79ee25.jpg", + "table_caption": [ + "Table 6. Pre-training hyperparameters We detail the hyperparameters used for pre-training different model configurations to derive scaling laws." + ], + "table_footnote": [], + "table_body": "
Vision encoder\nlr schedulerInterleaved\n(CE)Image-Caption\n(CE)Text\n(CE)AVG\n(CE)AVG (SFT)\n(Acc)
12.5212.152.8672.51343.49
0.12.5022.0662.8622.47752.27
0.012.5022.0662.8592.47653.76
0.0012.5132.0662.8572.479-
0 (frozen)2.5042.0612.8562.47454.14
", + "bbox": [ + 537, + 750, + 885, + 849 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 7. Vision encoder scalar. Freezing the vision encoder works best when initializing late-fusion models with pre-trained models.", + "bbox": [ + 511, + 851, + 906, + 891 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 89, + 364, + 242 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 89, + 617, + 242 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 90, + 888, + 242 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg", + "image_caption": [ + "Figure 14. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the model size and the number of training tokens. The gap decreases mainly due to scaling models size." + ], + "image_footnote": [], + "bbox": [ + 238, + 248, + 759, + 270 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg", + "image_caption": [ + "Figure 15. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models become better when increasing the proportion of interleaved documents. Early and late fusion has 1.63B and 1.75B parameters respectively." + ], + "image_footnote": [], + "bbox": [ + 94, + 327, + 367, + 478 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 328, + 624, + 477 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 347, + 893, + 477 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Vision encoder lrScalerInterleaved (CE)Image-Caption (CE)Text (CE)AVG (CE)AVG (SFT) (Acc)
0.12.6742.2193.0722.65534.84
0.012.6722.1973.0712.64738.77
0.0012.6742.2183.0732.65538.46
", + "bbox": [ + 112, + 575, + 464, + 648 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 8. Vision encoder scalar. Reducing the learning rate for the vision encoder is better when training late-fusion models from scratch.", + "bbox": [ + 89, + 648, + 482, + 691 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B. Late vs early fusion", + "text_level": 1, + "bbox": [ + 89, + 695, + 284, + 713 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section provides additional comparison between early and late fusion models.", + "bbox": [ + 89, + 720, + 482, + 750 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.1. Scaling FLOPs", + "text_level": 1, + "bbox": [ + 89, + 757, + 246, + 773 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 14 compares early-fusion and late-fusion models when scaling FLOPs. Specifically, for each model size, we train multiple models using different amounts of training tokens. The performance gap between the two approaches mainly decreases due to increasing model sizes rather than increasing the number of training tokens. Despite the decreasing gap, across all the models that we train, early-fusion consistently outperform late-fusion.", + "bbox": [ + 89, + 779, + 483, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.2. Changing the training data mixture", + "text_level": 1, + "bbox": [ + 511, + 577, + 826, + 593 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We analyze how the performance gap between early and late fusion models changes with variations in the training data mixture. As shown in Figure 16 and Figure 15, when fixing the model size, increasing the ratio of text and interleaved data favors early fusion. Interestingly, the gap remains largely unchanged for other data types. We also observe interference effects between different data types. Specifically, increasing the amount of interleaved data negatively impacts performance on image captions and vice versa. Additionally, increasing the proportion of text-only data slightly improves interleaved performance but increases loss on image captions. Overall, we find that text-only and interleaved data are correlated across different setups.", + "bbox": [ + 511, + 599, + 906, + 796 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.3. Scaling image resolution is in favor of early-fusion", + "text_level": 1, + "bbox": [ + 511, + 801, + 906, + 832 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We examine how both architectures perform with varying image resolution. We fix the number of model parameters to 1.63B and 1.75B for early and late fusion respectively. All models are trained for 100K steps or 200B tokens. Since", + "bbox": [ + 511, + 839, + 908, + 902 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg", + "image_caption": [ + "Figure 16. Early vs late fusion: changing the amount of text-only data in the training mixture (isoFLOPs). We vary the ratio of text-only data and plot the final training loss. The gap increases with the text data ratio in favor of early fusion model. Early fusion has 1.63B parameters and late fusion 1.75B parameters." + ], + "image_footnote": [], + "bbox": [ + 96, + 112, + 356, + 242 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 112, + 625, + 242 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 111, + 888, + 242 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg", + "image_caption": [ + "Figure 17. Early vs late fusion: training with different image resolutions (isoFLOPs). For the same training FLOPs we vary the image resolution (and thus the number of image tokens) during training and report the final training loss. Increasing resolution, hurts the performance on text and interleaved documents, while helping image captioning. The gap stays almost the same on text and interleaved data while slightly increase on image captioning in favor of early fusion." + ], + "image_footnote": [], + "bbox": [ + 94, + 340, + 289, + 487 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 340, + 470, + 487 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "the patch size remains constant, increasing the resolution results in a higher number of visual tokens. For all resolutions, we maintain the same number of text tokens. As shown in Figure 17, the early-fusion model consistently outperforms the late-fusion model across resolutions, particularly for multimodal data, with the performance gap widening at higher resolutions. Additionally, we observe that the loss on text and interleaved data increases as resolution increases.", + "bbox": [ + 89, + 614, + 482, + 750 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.4. Early-fusion is consistently better when matching the late-fusion model size", + "text_level": 1, + "bbox": [ + 89, + 757, + 482, + 789 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we compare the late-fusion model with different configurations of early-fusion one. Specifically, we train early-fusion models that match the late-fusion model in total parameters (Params), text model size (Text), and FLOPs (FLOPs), assuming 45-45-10 training mixture. As shown in Figure 18, early fusion consistently outperforms late fusion when normalized by total parameters, followed", + "bbox": [ + 89, + 794, + 482, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "by normalization by FLOPs. When matching the text model size, early fusion performs better at higher ratios of interleaved data.", + "bbox": [ + 511, + 342, + 905, + 387 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.5. Different late-fusion configuration", + "text_level": 1, + "bbox": [ + 511, + 396, + 815, + 412 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We examine how this scaling changes with different late-fusion configurations. Instead of scaling both the vision and text models equally, as done in the main paper, we fix the vision encoder size to 300M and scale only the text model. Figure 19 shows that late-fusion models lag behind at smaller model sizes, with the gap closing significantly as the text model scales. This suggests that allocating more parameters to shared components is more beneficial, further supporting the choice of early-fusion models.", + "bbox": [ + 511, + 417, + 906, + 554 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.6. Different context lengths", + "text_level": 1, + "bbox": [ + 511, + 561, + 741, + 578 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the paper, we use a 1k context length following [31]. Also following, this paper, we ignore the context length effect, as the model dimension dominates the training compute estimate. Moreover, [53] empirically found that scaling coefficients are robust to context length. Nevertheless, Our initial experiments (Figure 20) indicate that scaling the context length did not significantly affect the comparison between late and early fusion.", + "bbox": [ + 511, + 583, + 905, + 705 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.7. Initializing from LLM and CLIP", + "text_level": 1, + "bbox": [ + 511, + 712, + 805, + 728 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We study the case where both late and early fusion models are initialized from pre-trained models, specifically DCLM-1B [39] and CLIP-ViT-L [55] for late fusion. Interestingly, Figure 21 shows that for text and interleaved multimodal documents, early fusion can match the performance of late fusion when trained for longer. However, closing the gap on image caption data remains more challenging. Notably, when considering the overall training cost, including that of pre-trained models, early fusion requires significantly longer training to compensate for the vision encoder's pretraining cost.", + "bbox": [ + 511, + 734, + 906, + 900 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 106, + 89, + 354, + 262 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 88, + 647, + 261 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 656, + 85, + 880, + 263 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg", + "image_caption": [ + "Figure 18. Early vs late fusion: changing the training mixture and early-fusion configuration. We vary the training mixtures and plot the final training loss for different configuration of early fusion models. For the same number of total parameters early fusion consistently outperform late fusion." + ], + "image_footnote": [], + "bbox": [ + 321, + 263, + 674, + 286 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 349, + 354, + 541 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 352, + 625, + 542 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 352, + 897, + 542 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg", + "image_caption": [ + "Figure 19. Early vs late fusion: scaling training FLOPs while fixing the vision encoder size. We compare early and late fusion models when scaling both the amount of training tokens and model sizes. For late fusion mdoels, we fix the vision encoder size (300M) and scale the text model (250M, 834M, 2B, 3B). The gap between early and late get tighter when scaling the text model." + ], + "image_footnote": [], + "bbox": [ + 263, + 542, + 735, + 580 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg", + "image_caption": [ + "Figure 20. Early vs late fusion with different context lengths." + ], + "image_footnote": [], + "bbox": [ + 153, + 667, + 464, + 845 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg", + "image_caption": [ + "Figure 21. Early vs late fusion when initializing the encoder and decoder. Early-fusion can match the performance of late-fusion models when trained for longer. However, the gap is bigger on image-caption data." + ], + "image_footnote": [], + "bbox": [ + 522, + 671, + 908, + 811 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C. Scaling laws", + "text_level": 1, + "bbox": [ + 89, + 89, + 225, + 107 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.1. Fitting $L = F(N,D)$", + "text_level": 1, + "bbox": [ + 89, + 113, + 282, + 131 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Following [26], we determine the parameters that minimize the following objective across all our runs $i$ :", + "bbox": [ + 89, + 136, + 482, + 167 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {a, b, e, \\alpha , \\beta} \\sum_ {i} \\operatorname {H u b e r} _ {\\delta} \\left(\\operatorname {L S E} \\left(a - \\alpha \\log N _ {i}, b - \\beta \\log D _ {i}, e\\right) - \\log L _ {i}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 93, + 172, + 482, + 209 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We perform this optimization across various initialization ranges and select the parameters that achieve the lowest loss across all initializations. Specifically, our grid search spans $\\{0, 0.5, 2.5\\}$ for $\\alpha$ and $\\beta$ , $\\{0, 5, 10, \\dots, 30\\}$ for $a$ and $b$ , and $\\{-1, -0.5, 1, 0.5\\}$ for $e$ . We use the L-BFGS algorithm with $\\delta = 1e - 3$ .", + "bbox": [ + 89, + 210, + 483, + 300 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2. Fitting $N \\propto C^{a}, D \\propto C^{b}, D \\propto N^{d}$", + "text_level": 1, + "bbox": [ + 89, + 310, + 377, + 327 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While these equations have a closed-form solution [26] for early-fusion models that can be derived from Eq 1, this is not the case for late-fusion models without specifying either the vision encoder or text model size. To ensure a fair comparison, we derive these equations for both models, by performing linear regression in log space. We found that the regression is very close to the coefficient found with closed-form derivation Table 9. For instance, to derive $N = K_{a}C^{a}$ , given a FLOP budget $C$ and a set of linearly spaced tokens $D_{i}$ ranging from 10B to 600B, we compute the model size for each $D_{i}$ as $N_{i} = \\frac{C}{6D}$ for early fusion and $N_{i} = \\frac{C}{6D} + 0.483 * N_{v}$ for late fusion (for the 45-45-10 mixture, $D_{v} = 0.544D$ , thus $C = 6D(0.544N_{v} + N_{t})$ ). We then apply Eq 1 to obtain the loss for each model size and select $N$ that has the minimum loss. We repeat this for all FLOP values corresponding to our runs, resulting in a set of points $(C, N_{opt})$ that we use to regress $a$ and $K_{a}$ . We follow a similar procedure to find $b$ and $d$ . For late-fusion models, we regress a linear model to determine $N_{v}$ given $N$ . Notably, even though we maintain a fixed width ratio for late-fusion models, this approach is more accurate, as embedding layers prevent a strictly fixed ratio between text and vision model sizes. We present the regression results in Figure 22.", + "bbox": [ + 89, + 332, + 483, + 696 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Modelabdndn
Closed form0.526490.473510.899381.11188-0.05298
Regression0.523910.475340.900521.10224-0.04933
", + "bbox": [ + 114, + 710, + 464, + 762 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 9. Scaling laws parameters for early-fusion. Doing regression to derive the scaling laws coefficients leads to very close results to using the closed-form solution.", + "bbox": [ + 89, + 762, + 482, + 805 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3. Fitting $L \\propto C^c$", + "text_level": 1, + "bbox": [ + 89, + 832, + 241, + 849 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To determine the relationship between the final model loss and the compute budget $C$ , we begin by interpolating the points corresponding to the same model size and compute", + "bbox": [ + 89, + 854, + 483, + 901 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "the convex hull that covers the minimum loss achieved by all runs for each FLOP. This results in a continuous mapping from the FLOPs to the lowest loss. We consider a range of FLOPs, excluding very small values $(\\leq 3e^{19})$ , and construct a dataset of $(C,L)$ for linearly spaced compute $C$ . Using this data, we find the linear relationship between $L$ and $C$ in the log space and deduce the exponent $c$ . We visualize the results in Figure 26.", + "bbox": [ + 511, + 90, + 906, + 212 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg", + "image_caption": [ + "C" + ], + "image_footnote": [], + "bbox": [ + 517, + 229, + 705, + 328 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg", + "image_caption": [ + "C" + ], + "image_footnote": [], + "bbox": [ + 714, + 228, + 890, + 328 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 347, + 715, + 446 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 715, + 347, + 888, + 446 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg", + "image_caption": [ + "C", + "C" + ], + "image_footnote": [], + "bbox": [ + 517, + 460, + 745, + 559 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg", + "image_caption": [ + "C", + "C", + "Figure 22. Regression results of the scaling laws coefficients. our estimation of the scaling coefficients is close to the closed form solution." + ], + "image_footnote": [], + "bbox": [ + 746, + 460, + 895, + 559 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.4. Scaling laws for different target data type", + "text_level": 1, + "bbox": [ + 511, + 666, + 872, + 683 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Figure 27, we derive the scaling laws for different target data types. In general, we observe that the model learns image captioning faster than interleaved data, as indicated by the higher absolute value of the scaling exponent (e.g., 0.062 vs 0.046), despite using the same data ratio for captioning and interleaved data (45% each). Additionally, we find that the model learns more slowly on text-only data, likely due to the smaller amount of text-only data (10%). Across model configurations, we find that early fusion scales similarly to late fusion on image captioning but has a lower multiplicative constant (49.99 vs 47.97). For MoEs, the model learns faster but exhibits a higher multiplicative constant. On text and interleaved data, early and late fusion models scale similarly and achieve comparable", + "bbox": [ + 511, + 688, + 906, + 900 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg", + "image_caption": [ + "Figure 23. Observed vs predicted loss. We visualize the loss predicted by our scaling laws (Eq 1) and the actual loss achieved by each run." + ], + "image_footnote": [], + "bbox": [ + 135, + 85, + 509, + 324 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 85, + 890, + 325 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "performance. However, MoEs demonstrate better overall performance while learning slightly more slowly.", + "bbox": [ + 89, + 386, + 482, + 417 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.5. Scaling laws for different training mixtures", + "text_level": 1, + "bbox": [ + 89, + 429, + 462, + 445 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We investigate how the scaling laws change when modifying the training mixtures. Specifically, we vary the ratio of image caption, interleaved, and text-only data and report the results in Figure 28. Overall, we observe similar scaling trends, with only minor changes in the scaling coefficients. Upon closer analysis, we find that increasing the ratio of a particular data type in the training mixture, leads to a corresponding increase in its scaling exponent. For instance, increasing the ratio of image captions from $30\\%$ to $40\\%$ raises the absolute value of the exponent from 0.056 to 0.061. However, for text-only data, we do not observe significant changes in the scaling coefficients when varying its proportion in the training mixture.", + "bbox": [ + 88, + 452, + 482, + 650 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ParameterMSER2MAE (%)
Held-in0.00290.98070.8608
Held-out0.00040.96820.5530
", + "bbox": [ + 153, + 666, + 424, + 718 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/c108eee9a2065d7228436e8d9b0fa0023a328984cbb26e12f2be985be92453a1.jpg", + "table_caption": [ + "Table 10. Scaling laws prediction errors. We report the mean square error, R2 and mean absolute error for the loss prediction for held-in and held-out (8B model) data." + ], + "table_footnote": [], + "table_body": "
ModelEαβabd
Avg1.809220.298420.332090.543020.483010.92375
Std0.338110.101010.028920.088130.057870.23296
", + "bbox": [ + 114, + 800, + 464, + 848 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 11. Scaling laws sensitivity. We report the mean and standard deviation after bootstrapping with 100 iterations.", + "bbox": [ + 89, + 849, + 482, + 878 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.6. Scaling laws evaluation", + "text_level": 1, + "bbox": [ + 513, + 385, + 732, + 400 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For each model size and number of training tokens, we compute the loss using the estimated functional form in Eq 1 and compare it to the actual loss observed in our runs. Figure 23, Figure 24, and Table 10 visualizes these comparisons, showing that our estimation is highly accurate, particularly for lower loss values and larger FLOPs. We also assess our scaling laws in an extrapolation setting, predicting performance beyond the model sizes used for fitting. Notably, our approach estimates the performance of an 8B model with reasonable accuracy.", + "bbox": [ + 511, + 407, + 906, + 559 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Additionally, we conduct a sensitivity analysis using bootstrapping. Specifically, we sample $P$ points with replacement ( $P$ being the total number of trained models) and re-estimate the scaling law coefficients. This process is repeated 100 times, and we report the mean and standard deviation of each coefficient. Table 11 shows that our estimation is more precise for $\\beta$ than for $\\alpha$ , primarily due to the smaller number of model sizes relative to the number of different token counts used to derive the scaling laws.", + "bbox": [ + 511, + 561, + 908, + 699 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.7. Scaling laws for sparse NMMs.", + "text_level": 1, + "bbox": [ + 511, + 709, + 790, + 726 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Similar to dense models, we fit a parametric loss function (Eq 1) to predict the loss of sparse NMMs based on the number of parameters and training tokens, replacing the total parameter count with the number of active parameters. While incorporating sparsity is standard when deriving scaling laws for MoEs [2, 33, 74], we focus on deriving scaling laws specific to the sparsity level used in our MoE setup. This yields coefficients that are implicitly conditioned on the sparsity configuration.", + "bbox": [ + 511, + 732, + 906, + 869 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We also experiment with a sparsity-aware formulation of the scaling law as proposed in [2], and observe consistent", + "bbox": [ + 511, + 869, + 908, + 902 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg", + "image_caption": [ + "Figure 24. Observed vs predicted loss. We visualize the loss predicted by our scaling laws Eq 1 and the actual loss achieved by each run. We can reliably predict the performance of models larger (8B params) than those used to fit the scaling laws." + ], + "image_footnote": [], + "bbox": [ + 107, + 85, + 472, + 316 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "trends (Table 12). In particular, the exponents associated with model size $(N)$ are substantially larger than those for training tokens $(\\beta)$ , reinforcing the importance of scaling model size in sparse architectures. Additionally, we observe that the terms governing the scaling of active parameters decompose into two components.", + "bbox": [ + 89, + 382, + 483, + 474 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D. Discussion and Limitations", + "text_level": 1, + "bbox": [ + 89, + 491, + 346, + 508 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Scaling laws for multimodal data mixtures. Our scaling laws study spans different model configurations and training mixtures. While results suggest that the scaling law coefficients remain largely consistent across mixtures, a broader exploration of mixture variations is needed to validate this observation and establish a unified scaling law that accounts for this factor.", + "bbox": [ + 89, + 518, + 483, + 625 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Scaling laws and performance on downstream tasks. Similar to previous scaling law studies, our analysis focuses on pretraining performance as measured by the validation loss. However, the extent to which these findings translate to downstream performance remains an open question and requires further investigation.", + "bbox": [ + 89, + 626, + 482, + 715 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Extrapolation to larger scales. The accuracy of scaling law predictions improves with increasing FLOPs Appendix C. Furthermore, we validate our laws when extrapolating to larger model sizes (Appendix C.6). However, whether these laws can be reliably extrapolated to extremely large model sizes remains an open question.", + "bbox": [ + 89, + 717, + 482, + 808 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "High resolution and early-fusion models. Training early-fusion models with high-resolution inputs leads to a significant increase in vision tokens. While pooling techniques have been widely adopted for late-fusion models, alternative approaches may be necessary for early fusion. Given the similarity of early-fusion models to LLMs, it appears", + "bbox": [ + 89, + 810, + 483, + 902 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "that techniques for extending context length could be beneficial.", + "bbox": [ + 511, + 90, + 903, + 119 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Scaling laws for multimodal MoEs models. For MoEs, we consider only a single configuration (top-1 routing with 8 experts). We found this configuration to work reasonably well in our setup, and follow a standard MoEs implementation. However, the findings may vary when optimizing more the MoE architecture or exploring different load-balancing, routing strategies or different experts implementations.", + "bbox": [ + 511, + 119, + 906, + 242 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E. Mixture of experts and modality-specific specialization", + "text_level": 1, + "bbox": [ + 511, + 255, + 903, + 290 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.1. MoEs configuration", + "text_level": 1, + "bbox": [ + 511, + 297, + 705, + 313 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We experiment with different MoEs configuration by changing the number of experts and the top-k. We report a sample of these experiments in Table 13.", + "bbox": [ + 511, + 319, + 906, + 367 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.2. MoEs specialization", + "text_level": 1, + "bbox": [ + 513, + 375, + 705, + 390 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg", + "image_caption": [ + "Figure 25. Modality-specific specialization. We visualize the experts specialization to text and image modalities. Models are evaluated on Obelics." + ], + "image_footnote": [], + "bbox": [ + 576, + 411, + 844, + 563 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We investigate multimodal specialization in MoE architectures. We compute a specialization score as the average difference between the number of text/images tokens assigned to each expert and a uniform assignment $(1 / E)$ . Additionally, we visualize the normalized number of text and image tokens assigned to each expert across layers. Figure 25 shows clear modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases but rises again in the very last layers. This suggests that early and final layers require more modality specialization compared to mid-layers. Additionally, we observe several experts shared between text and image modalities, a phenomenon not present in hard-routed or predefined modality-specific experts.", + "bbox": [ + 511, + 628, + 908, + 840 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
L(N,D) = E + A/Nα + B/DβvsL(N,D,S) = A/Nα + B/Dβ + C(1-S)λ + d(1-S)δNγ
ModelEABαβλδγCd
L(N,D) (Eq 1)2.15838177346590.7100.372-----
L(N,D,S) [2]1.0788146600.58900.37200.20.20.709561.0788381475
", + "bbox": [ + 135, + 130, + 864, + 208 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/954ecdf74a27126739fe55ea72c130d722bf017ede0c1fb5950a4e172b17fdcd.jpg", + "table_caption": [ + "Table 12. Scaling laws for sparse native multimodal models." + ], + "table_footnote": [], + "table_body": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
4-E-top-140.055264.06814.28441.94861.4618.51662.20134.08
8-E-top-141.693465.68417.5542.90863.2619.06567.87739.63
8-E-top-242.854666.46619.16245.34463.9419.36165.98841.649
8-E-top-2 finegrained39.90462.7615.5841.8861.617.757.5235.42
", + "bbox": [ + 135, + 323, + 864, + 402 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 13. SFT results with different MoEs configurations.", + "bbox": [ + 310, + 405, + 683, + 419 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg", + "image_caption": [ + "Figure 26. Scaling laws for native multimodal models. From left to right: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. The scaling exponents are very close for all models. However, MoEs leads to overall lower loss (smaller multiplicative constant) and takes longer to saturate." + ], + "image_footnote": [], + "bbox": [ + 94, + 521, + 906, + 795 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 141, + 354, + 333 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 141, + 620, + 333 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 166, + 890, + 332 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 93, + 358, + 349, + 525 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 358, + 620, + 523 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 359, + 890, + 523 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg", + "image_caption": [ + "Figure 27. Scaling laws for native multimodal models. From top to bottom: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. From left to right: cross-entropy on the validation set of image-caption, interleaved and text-only data." + ], + "image_footnote": [], + "bbox": [ + 94, + 551, + 364, + 736 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 551, + 633, + 736 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 551, + 901, + 736 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
0.289B0.494B1B1.748B2.430B3.714B
0.275B0.464B0.932B1.627B2.280B3.354B
0.275B0.464B0.932B1.627B2.280B3.354B
", + "bbox": [ + 246, + 751, + 751, + 801 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 116, + 349, + 281 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg", + "image_caption": [ + "45-45-10", + "40-20-40" + ], + "image_footnote": [], + "bbox": [ + 364, + 116, + 620, + 281 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 117, + 888, + 280 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 93, + 308, + 348, + 474 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg", + "image_caption": [ + "30-30-40" + ], + "image_footnote": [], + "bbox": [ + 366, + 309, + 619, + 473 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 309, + 888, + 473 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 501, + 348, + 667 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg", + "image_caption": [ + "20-40-40" + ], + "image_footnote": [], + "bbox": [ + 366, + 500, + 619, + 665 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 501, + 888, + 665 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 694, + 348, + 857 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 694, + 619, + 857 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 694, + 888, + 857 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg", + "image_caption": [ + "Figure 28. Scaling laws for early-fusion native multimodal models. Our runs across different training mixtures (Image-caption-Interleaved-Text) and FLOPs. We visualize the final validation loss on 3 data types: HQITP (left), Obelics (middle) and DCLM (right)." + ], + "image_footnote": [], + "bbox": [ + 251, + 864, + 754, + 886 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 924, + 508, + 935 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_model.json b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..87b8c32ec6c4c22cadcbc6cf0abeac82e516f87f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_model.json @@ -0,0 +1,4556 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.275, + 0.131, + 0.723, + 0.153 + ], + "angle": 0, + "content": "Scaling Laws for Native Multimodal Models" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.182, + 0.292, + 0.198 + ], + "angle": 0, + "content": "Mustafa Shukor²" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.183, + 0.41, + 0.197 + ], + "angle": 0, + "content": "Enrico Fini" + }, + { + "type": "text", + "bbox": [ + 0.445, + 0.183, + 0.681, + 0.197 + ], + "angle": 0, + "content": "Victor Guilherme Turrisi da Costa1" + }, + { + "type": "text", + "bbox": [ + 0.716, + 0.183, + 0.822, + 0.197 + ], + "angle": 0, + "content": "Matthieu Cord²" + }, + { + "type": "text", + "bbox": [ + 0.348, + 0.207, + 0.468, + 0.223 + ], + "angle": 0, + "content": "Joshua Susskind" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.207, + 0.649, + 0.224 + ], + "angle": 0, + "content": "Alaaeldin El-Nouby" + }, + { + "type": "text", + "bbox": [ + 0.369, + 0.231, + 0.427, + 0.249 + ], + "angle": 0, + "content": "1Apple" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.231, + 0.629, + 0.249 + ], + "angle": 0, + "content": "\\(^{2}\\)Sorbonne University" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.283, + 0.327, + 0.299 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.316, + 0.485, + 0.635 + ], + "angle": 0, + "content": "Building general-purpose models that can effectively perceive the world through multimodal signals has been a long-standing goal. Current approaches involve integrating separately pre-trained components, such as connecting vision encoders to LLMs and continuing multimodal training. While such approaches exhibit remarkable sample efficiency, it remains an open question whether such late-fusion architectures are inherently superior. In this work, we revisit the architectural design of native multimodal models (NMMs)-those trained from the ground up on all modalities—and conduct an extensive scaling laws study, spanning 457 trained models with different architectures and training mixtures. Our investigation reveals no inherent advantage to late-fusion architectures over early-fusion ones, which do not rely on image encoders or tokenizers. On the contrary, early-fusion exhibits stronger performance at lower parameter counts, is more efficient to train, and is easier to deploy. Motivated by the strong performance of the early-fusion architectures, we show that incorporating Mixture of Experts (MoEs) allows models to learn modality-specific weights, significantly benefiting performance." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.663, + 0.222, + 0.679 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.688, + 0.483, + 0.81 + ], + "angle": 0, + "content": "Multimodality provides a rich signal for perceiving and understanding the world. Advances in vision [23, 52, 55, 80] and language models [3, 19, 67] have enabled the development of powerful multimodal models that understand language, images, and audio. A common approach involves grafting separately pre-trained unimodal models, such as connecting a vision encoder to the input layer of an LLM [6, 9, 35, 43, 62, 64, 73, 78]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Although this seems like a convenient approach, it remains an open question whether such late-fusion strategies are inherently optimal for understanding multimodal signals. Moreover, with abundant multimodal data available, initializing from unimodal pre-training is potentially detrimental, as it may introduce biases that prevent the model" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.285, + 0.875, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.692, + 0.396, + 0.731, + 0.407 + ], + "angle": 0, + "content": "FLOPs" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.409, + 0.875, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.695, + 0.522, + 0.736, + 0.532 + ], + "angle": 0, + "content": "FLOPs" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.537, + 0.907, + 0.649 + ], + "angle": 0, + "content": "Figure 1. Scaling properties of Native Multimodal Models. Based on the scaling laws study in § 3.1, we observe: (1) early and late fusion models provide similar validation loss \\( L \\) when trained with the same compute budget \\( C \\) (FLOPs); (2) This performance is achieved via a different trade-off between parameters \\( N \\) and number of training tokens \\( D \\), where early-fusion models require fewer parameters. (3) Sparse early-fusion models achieve lower loss and require more training tokens for a given FLOP budget." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.653, + 0.907, + 0.775 + ], + "angle": 0, + "content": "from fully leveraging cross-modality co-dependancies. An additional challenge is scaling such systems; each component (e.g., vision encoder, LLM) has its own set of hyperparameters, pre-training data mixtures, and scaling properties with respect to the amount of data and compute applied. A more flexible architecture might allow the model to dynamically allocate its capacity across modalities, simplifying scaling efforts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.903 + ], + "angle": 0, + "content": "In this work, we focus on the scaling properties of native multimodal models trained from the ground up on multimodal data. We first investigate whether the commonly adopted late-fusion architectures hold an intrinsic advantage by comparing them to early-fusion models, which process raw multimodal inputs without relying on dedicated vision encoders. We conduct scaling experiments on early and late fusion architectures, deriving scaling laws to pre" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.28, + 0.059, + 0.718 + ], + "angle": 270, + "content": "arXiv:2504.07951v4 [cs.CV] 9 Aug 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.287 + ], + "angle": 0, + "content": "dict their performance and compute-optimal configurations. Our findings indicate that late fusion offers no inherent advantage when trained from scratch. Instead, early-fusion models are more efficient and are easier to scale. Furthermore, we observe that native multimodal models follow scaling laws similar to those of LLMs [26], albeit with slight variations in scaling coefficients across modalities and datasets. Our results suggest that model parameters and training tokens should be scaled roughly equally for optimal performance. Moreover, we find that different multimodal training mixtures exhibit similar overall trends, indicating that our findings are likely to generalize to a broader range of settings." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.288, + 0.483, + 0.562 + ], + "angle": 0, + "content": "While our findings favor early fusion, multimodal data is inherently heterogeneous, suggesting that some degree of parameter specialization may still offer benefits. To investigate this, we explore leveraging Mixture of Experts (MoEs) [59], a technique that enables the model to dynamically allocate specialized parameters across modalities in a symmetric and parallel manner, in contrast to late-fusion models, which are asymmetric and process data sequentially. Training native multimodal models with MoEs results in significantly improved performance and therefore, faster convergence. Our scaling laws for MoEs suggest that scaling number of training tokens is more important than the number of active parameters. This unbalanced scaling is different from what is observed for dense models, due to the higher number of total parameters for sparse models. In addition, Our analysis reveals that experts tend to specialize in different modalities, with this specialization being particularly prominent in the early and last layers." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.573, + 0.32, + 0.589 + ], + "angle": 0, + "content": "1.1. Summary of our findings" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.596, + 0.383, + 0.611 + ], + "angle": 0, + "content": "Our findings can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.612, + 0.483, + 0.718 + ], + "angle": 0, + "content": "Native Early and Late fusion perform on par: Early fusion models trained from scratch perform on par with their late-fusion counterparts, with a slight advantage to early-fusion models for low compute budgets (Figure 3). Furthermore, our scaling laws study indicates that the compute-optimal models for early and late fusion perform similarly as the compute budget increases (Figure 1 Top)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.718, + 0.483, + 0.779 + ], + "angle": 0, + "content": "NMMs scale similarly to LLMs: The scaling laws of native multimodal models follow similar laws as text-only LLMs with slightly varying scaling exponents depending on the target data type and training mixture (Table 2)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.483, + 0.825 + ], + "angle": 0, + "content": "Late-fusion requires more parameters: Compute-optimal late-fusion models require a higher parameters-to-data ratio when compared to early-fusion (Figure 1 bottom)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Sparsity significantly benefits early-fusion NMMs: Sparse NMMs exhibit significant improvements compared to their dense counterparts at the same inference cost (Figure 10). Furthermore, they implicitly learn modality-specific weights when trained with sparsity (Figure 12). In" + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.09, + 0.905, + 0.213 + ], + "angle": 0, + "content": "
ExpressionDefinition
NNumber of parameters in the multimodal decoder. For MoEs this refers to the active parameters only.
DTotal number of multimodal tokens.
NvNumber of parameters in the vision-specific encoder. Only exists in late-fusion architectures.
DvNumber of vision-only tokens.
CTotal number of FLOPs, estimated as C = 6ND for early-fusion and C = 6(NvDv + ND) for late-fusion.
LValidation loss measured as the average over interleaved image-text, image-caption, and text-only data mixtures.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.215, + 0.905, + 0.228 + ], + "angle": 0, + "content": "Table 1. Definitions of the expressions used throughout the paper." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.234, + 0.905, + 0.279 + ], + "angle": 0, + "content": "addition, compute-optimal models rely more on scaling the number of training tokens than the number of active parameters as the compute-budget grows (Figure 1 Bottom)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.28, + 0.905, + 0.341 + ], + "angle": 0, + "content": "Modality-agnostic routing beats Modality-aware routing for Sparse NMMs: Training sparse mixture of experts with modality-agnostic routing consistently outperforms models with modality-aware routing (Figure 11)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.349, + 0.652, + 0.365 + ], + "angle": 0, + "content": "2. Preliminaries" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.374, + 0.634, + 0.389 + ], + "angle": 0, + "content": "2.1. Definitions" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.397, + 0.905, + 0.488 + ], + "angle": 0, + "content": "Native Multimodal Models (NMMs): Models that are trained from scratch on all modalities simultaneously without relying on pre-trained LLMs or vision encoders. Our focus is on the representative image and text modalities, where the model processes both text and images as input and generates text as output." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.488, + 0.906, + 0.593 + ], + "angle": 0, + "content": "Early fusion: Enabling multimodal interaction from the beginning, using almost no modality-specific parameters (e.g., except a linear layer to patchify images). Using a single transformer model, this approach processes raw multimodal input—tokenized text and continuous image patches—with no image discretization. In this paper, we refer to the main transformer as the decoder." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.595, + 0.906, + 0.655 + ], + "angle": 0, + "content": "Late fusion: Delaying the multimodal interaction to deeper layers, typically after separate unimodal components has processed that process each modality independently (e.g., a vision encoder connected to a decoder)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.656, + 0.905, + 0.701 + ], + "angle": 0, + "content": "Modality-agnostic routing: In sparse mixture-of-experts, modality-agnostic routing refers to relying on a learned router module that is trained jointly with the model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.702, + 0.905, + 0.746 + ], + "angle": 0, + "content": "Modality-aware routing: Routing based on pre-defined rules such as routing based on the modality type (e.g., vision-tokens, token-tokens)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.758, + 0.652, + 0.773 + ], + "angle": 0, + "content": "2.2. Scaling Laws" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.906, + 0.901 + ], + "angle": 0, + "content": "We aim to understand the scaling properties of NMMs and how different architectural choices influence trade-offs. To this end, we analyze our models within the scaling laws framework proposed by Hoffmann et al. [26], Kaplan et al. [31]. We compute FLOPs based on the total number of parameters, using the approximation \\( C = 6ND \\), as adopted in prior work [2, 26]. However, we modify this estimation to suit our setup: for late-fusion models, FLOPs is computed" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.088, + 0.476, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.263, + 0.483, + 0.346 + ], + "angle": 0, + "content": "Figure 2. Scaling laws for early-fusion and late-fusion native multimodal models. Each point represents a model (300M to 3B parameters) trained on varying number of tokens (250M to 400B). We report the average cross-entropy loss on the validation sets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.352, + 0.484, + 0.457 + ], + "angle": 0, + "content": "as \\(6(N_{v}D_{v} + ND)\\). We consider a setup where, given a compute budget \\(C\\), our goal is to predict the model's final performance, as well as determine the optimal number of parameters or number of training tokens. Consistent with prior studies on LLM scaling [26], we assume a power-law relationship between the final model loss and both model size \\((N)\\) and training tokens \\((D)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.462, + 0.482, + 0.492 + ], + "angle": 0, + "content": "\\[\nL = E + \\frac {A}{N ^ {\\alpha}} + \\frac {B}{D ^ {\\beta}}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.492, + 0.484, + 0.629 + ], + "angle": 0, + "content": "Here, \\( E \\) represents the lowest achievable loss on the dataset, while \\( \\frac{A}{N^{\\alpha}} \\) captures the effect of increasing the number of parameters, where a larger model leads to lower loss, with the rate of improvement governed by \\( \\alpha \\). Similarly, \\( \\frac{B}{D^{\\beta}} \\) accounts for the benefits of a higher number of tokens, with \\( \\beta \\) determining the rate of improvement. Additionally, we assume a linear relationship between compute budget (FLOPs) and both \\( N \\) and \\( D \\) (\\( C \\propto ND \\)). This further leads to power-law relationships detailed in Appendix C.7." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.637, + 0.279, + 0.654 + ], + "angle": 0, + "content": "2.3. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Our models are based on the autoregressive transformer architecture [71] with SwiGLU FFNs [58] and QK-Norm [17] following Li et al. [39]. In early-fusion models, image patches are linearly projected to match the text token dimension, while late-fusion follows the CLIP architecture [55]. We adopt causal attention for text tokens and bidirectional attention for image tokens, we found this to work better. Training is conducted on a mixture of public and private multimodal datasets, including DCLM [39], Obelics [34], DFN [21], COYO [11], and a private collection of High-Quality Image-Text Pairs (HQITP). Images are resized to \\(224 \\times 224\\) resolution with a \\(14 \\times 14\\) patch size. We use a context length of 1k for the multimodal sequences. For training efficiency, we train our models with bfloat16, Fully Sharded Data Parallel (FSDP) [82], activation checkpointing, and gradient accumulation. We also use se" + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.089, + 0.905, + 0.243 + ], + "angle": 0, + "content": "
L = E + A/Nα + B/DβN ∝ CaD ∝CbL ∝CcD ∝Nd
ModelDataEαβabcd
GPT3 [10]Text------0.048
Chinchilla [26]Text1.6930.3390.2850.460.54-
NMM (early-fusion)Text2.2220.30840.33750.52460.4774-0.04200.9085
Image-Caption1.5690.31110.33860.52030.4785-0.06100.9187
Interleaved1.9660.29710.3380.53150.4680-0.04590.8791
AVG1.9040.3010.3350.52620.473-0.04920.8987
NMM (late-fusion)AVG1.8910.29030.33830.63580.4619-0.04940.6732
Sparse NMM (early-fusion)AVG2.1580.7100.3720.3610.656-0.0471.797
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.245, + 0.905, + 0.3 + ], + "angle": 0, + "content": "Table 2. Scaling laws for native multimodal models. We report the scaling laws results for early and late fusion models. We fit the scaling laws for different target data types as well as their average loss (AVG)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.306, + 0.905, + 0.398 + ], + "angle": 0, + "content": "quence packing for the image captioning dataset to reduce the amount of padded tokens. Similar to previous works [2, 5, 26], we evaluate performance on held-out subsets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). Further implementation details are provided in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.414, + 0.821, + 0.431 + ], + "angle": 0, + "content": "3. Scaling native multimodal models" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.44, + 0.906, + 0.531 + ], + "angle": 0, + "content": "In this section, we present a scaling laws study of native multimodal models, examining various architectural choices § 3.1, exploring different data mixtures § 3.2, analyzing the practical trade-offs between late and early fusion NMMs, and comparing the performance of native pretraining and continual pre-training of NMMs § 3.3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.532, + 0.907, + 0.715 + ], + "angle": 0, + "content": "Setup. We train models ranging from 0.3B to 4B active parameters, scaling the width while keeping the depth constant. For smaller training token budgets, we reduce the warm-up phase to 1K steps while maintaining 5K steps for larger budgets. Following Hagele et al. [25], models are trained with a constant learning rate, followed by a cooldown phase using an inverse square root scheduler. The cool-down phase spans \\(20\\%\\) of the total steps spent at the constant learning rate. To estimate the scaling coefficients in Eq 1, we apply the L-BFGS algorithm [51] and Huber loss [28] (with \\(\\delta = 10^{-3}\\)), performing a grid search over initialization ranges." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.727, + 0.722, + 0.743 + ], + "angle": 0, + "content": "3.1. Scaling laws of NMMs" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Scaling laws for early-fusion and late-fusion models. Figure 2 (left) presents the final loss averaged across interleaved, image-caption, and text datasets for early-fusion NMMs. The lowest-loss frontier follows a power law as a function of FLOPs. Fitting the power law yields the expression \\( L \\propto C^{-0.049} \\), indicating the rate of improvement with increasing compute. When analyzing the scaling laws per data type (e.g., image-caption, interleaved, text), we observe that the exponent varies (Table 2). For instance, the model achieves a higher rate of improvement for image-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.09, + 0.351, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.09, + 0.61, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.09, + 0.88, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.141, + 0.251, + 0.856, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.282, + 0.908, + 0.325 + ], + "angle": 0, + "content": "Figure 3. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the number of model parameters and the number of training tokens. Overall, early fusion shows a slight advantage, especially at smaller model sizes, and the gap decreases when scaling the number of parameters \\( N \\)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.342, + 0.483, + 0.373 + ], + "angle": 0, + "content": "caption data \\((L\\propto C^{-0.061})\\) when compared to interleaved documents \\((L\\propto C^{-0.046})\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.38, + 0.484, + 0.606 + ], + "angle": 0, + "content": "To model the loss as a function of the number of training tokens \\( D \\) and model parameters \\( N \\), we fit the parametric function in Eq 1, obtaining scaling exponents \\( \\alpha = 0.301 \\) and \\( \\beta = 0.335 \\). These describe the rates of improvement when scaling the number of model parameters and training tokens, respectively. Assuming a linear relationship between compute, \\( N \\), and \\( D \\) (i.e., \\( C \\propto ND \\)), we derive the law relating model parameters to the compute budget (see Appendix C for details). Specifically, for a given compute budget \\( C \\), we compute the corresponding model size \\( N \\) at logarithmically spaced \\( D \\) values and determine \\( N_{opt} \\), the parameter count that minimizes loss. Repeating this across different FLOPs values produces a dataset of \\( (C, N_{opt}) \\), to which we fit a power law predicting the compute-optimal model size as a function of compute: \\( N^{*} \\propto C^{0.526} \\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.614, + 0.484, + 0.657 + ], + "angle": 0, + "content": "Similarly, we fit power laws to estimate the compute-optimal training dataset size as a function of compute and model size:" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.659, + 0.405, + 0.679 + ], + "angle": 0, + "content": "\\[\nD _ {o p t} \\propto C ^ {0. 4 7 3}, D _ {o p t} \\propto N ^ {0. 8 9 9}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.683, + 0.484, + 0.774 + ], + "angle": 0, + "content": "These relationships allow practitioners to determine the optimal model and dataset size given a fixed compute budget. When analyzing by data type, we find that interleaved data benefits more from larger models (\\(a = 0.532\\)) compared to image_caption data (\\(a = 0.520\\)), whereas the opposite trend holds for training tokens." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.902 + ], + "angle": 0, + "content": "We conduct a similar study on late-fusion models in Figure 2 (right) and observe comparable scaling behaviors. In particular, the loss scaling exponent \\((c = -0.0494)\\) is nearly identical to that of early fusion \\((c = -0.0492)\\). This trend is evident in Figure 3, where early fusion outperforms late fusion at smaller model scales, while both architectures converge to similar performance at larger model sizes. We also observe similar trends when varying late-fusion con" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.343, + 0.709, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.344, + 0.904, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.517, + 0.907, + 0.56 + ], + "angle": 0, + "content": "Figure 4. Early vs late: pretraining efficiency. Early-fusion is faster to train and consumes less memory. Models are trained on 16 H100 GPUs for 160k steps (300B tokens)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.565, + 0.907, + 0.596 + ], + "angle": 0, + "content": "figurations, such as using a smaller vision encoder with a larger text decoder Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Scaling laws of NMMs vs LLMs. Upon comparing the scaling law coefficients of our NMMs to those reported for text-only LLMs (e.g., GPT-3, Chinchilla), we find them to be within similar ranges. In particular, for predicting the loss as a function of compute, GPT-3 [10] follows \\(L \\propto C^{-0.048}\\), while our models follow \\(L \\propto C^{-0.049}\\), suggesting that the performance of NMMs adheres to similar scaling laws as LLMs. Similarly, our estimates of the \\(\\alpha\\) and \\(\\beta\\) parameters in Eq 1 (\\(\\alpha = 0.301\\), \\(\\beta = 0.335\\)) closely match those reported by Hoffmann et al. [26] (\\(\\alpha = 0.339\\), \\(\\beta = 0.285\\)). Likewise, our computed values of \\(a = 0.526\\) and \\(b = 0.473\\) align closely with \\(a = 0.46\\) and \\(b = 0.54\\) from [26], reinforcing the idea that, for native multimodal models, the number of training tokens and model parameters should be scaled proportionally. However, since the gap between \\(a\\) and \\(b\\) is smaller than in LLMs, this principle holds even more strongly for NMMs. Additionally, as \\(a = 0.526\\) is greater than \\(b = 0.473\\) in our case, the optimal model size for NMMs is larger than that of LLMs," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.092, + 0.898, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.271, + 0.908, + 0.3 + ], + "angle": 0, + "content": "Figure 5. Scaling laws with different training mixtures. Early-fusion models follow similar scaling trends when changing the pretraining mixtures. However, increasing the image captions leads to a higher scaling exponent norm (see Table 3)." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.315, + 0.487, + 0.384 + ], + "angle": 0, + "content": "
C-I-T (%)I/T ratioEαβabdc
145-45-101.191.9060.3010.3350.5270.4740.901-0.0492
240-20-400.651.9650.3280.3480.5180.4860.937-0.0486
330-30-400.591.8470.2530.3380.5720.4280.748-0.0463
420-40-400.491.8360.2590.3540.5820.4230.726-0.0488
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.385, + 0.484, + 0.414 + ], + "angle": 0, + "content": "Table 3. Scaling laws for different training mixtures. Early-fusion models. C-I-T refer to image-caption, interleaved and text" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.419, + 0.484, + 0.451 + ], + "angle": 0, + "content": "while the optimal number of training tokens is lower, given a fixed compute budget." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.474, + 0.484, + 0.71 + ], + "angle": 0, + "content": "Compute-optimal trade-offs for early vs. late fusion NMMs. While late- and early-fusion models reduce loss at similar rates with increasing FLOPs, we observe distinct trade-offs in their compute-optimal models. Specifically, \\(N_{opt}\\) is larger for late-fusion models, whereas \\(D_{opt}\\) is larger for early-fusion models. This indicates that, given a fixed compute budget, late-fusion models require a higher number of parameters, while early-fusion models benefit more from a higher number of training tokens. This trend is also reflected in the lower \\(\\frac{N_{opt}}{D_{opt}} \\propto C^{0.053}\\) for early fusion compared to \\(\\frac{N_{opt}}{D_{opt}} \\propto C^{0.076}\\) for late fusion. As shown in Figure 1 (bottom), when scaling FLOPs, the number of parameters of early fusion models becomes significantly lower, which is crucial for reducing inference costs and, consequently, lowering serving costs after deployment." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.735, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Early-fusion is more efficient to train. We compare the training efficiency of late- and early-fusion architectures. As shown in Figure 4, early-fusion models consume less memory and train faster under the same compute budget. This advantage becomes even more pronounced as compute increases, highlighting the superior training efficiency of early fusion while maintaining comparable performance to late fusion at scale. Notably, for the same FLOPs, late-fusion models have a higher parameter count and higher effective depth (i.e., additional vision encoder layers alongside decoder layers) compared to early-fusion models." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.318, + 0.71, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.318, + 0.882, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.481, + 0.907, + 0.537 + ], + "angle": 0, + "content": "Figure 7. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models attain a favorable performance when increasing the proportion of interleaved documents and text-only data." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.542, + 0.854, + 0.558 + ], + "angle": 0, + "content": "3.2. Scaling laws for different data mixtures" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.909, + 0.903 + ], + "angle": 0, + "content": "We investigate how variations in the training mixture affect the scaling laws of native multimodal models. To this end, we study four different mixtures that reflect common community practices [34, 41, 46, 81], with Image Caption-Interleaved-Text ratios of 45-45-10 (our default setup), 30-30-40, 40-20-40, and 20-40-40. For each mixture, we conduct a separate scaling study by training 76 different models, following our setup in § 3.1. Overall, Figure 5 shows that different mixtures follow similar scaling trends; however, the scaling coefficients vary depending on the mixture (Table 3). Interestingly, increasing the proportion of image-caption data (mixtures 1 and 2) leads to lower \\(a\\) and higher \\(b\\), whereas increasing the ratio of interleaved and text data (mixtures 3 and 4) have the opposite effect. Notably, image-caption data contains more image tokens than text tokens; therefore, increasing its proportion results in more image tokens, while increasing interleaved and text data increases text token counts. This suggests that, when image tokens are prevalent, training for longer decreases the loss faster than increasing the model size. We also found that for a fixed model size, increasing text-only and interleaved data ratio is in favor of early-fusion Figure 7." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.089, + 0.496, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.257, + 0.483, + 0.3 + ], + "angle": 0, + "content": "Figure 8. Early native vs initializing from LLMs: initializing from pre-trained models and scaling training tokens. We compare training with and without initializing from DCLM-1B." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.304, + 0.484, + 0.336 + ], + "angle": 0, + "content": "3.3. Native multimodal pre-training vs. continual training of LLMs" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.342, + 0.485, + 0.538 + ], + "angle": 0, + "content": "In this section, we compare training natively from scratch to continual training after initializing from a pre-trained LLM. We initialize the model from DCLM-1B [21] that is trained on more than 2T tokens. Figure 8 shows that native multimodal models can close the gap with initialized models when trained for longer. Specifically, on image captioning data, the model requires fewer than 100B multimodal tokens to reach comparable performance. However, on interleaved and text data, the model may need longer training—up to 1T tokens. Considering the cost of pre-training, these results suggest that training natively could be a more efficient approach for achieving the same performance on multimodal benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.557, + 0.409, + 0.574 + ], + "angle": 0, + "content": "4. Towards multimodal specialization" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.583, + 0.483, + 0.764 + ], + "angle": 0, + "content": "Previously, we demonstrated that early-fusion models achieve performance on par with late-fusion models under a fixed compute budget. However, multimodal data is inherently heterogeneous, and training a unified model to fit such diverse distributions may be suboptimal. Here, we argue for multimodal specialization within a unified architecture. Ideally, the model should implicitly adapt to each modality, for instance, by learning modality-specific weights or specialized experts. Mixture of Experts is a strong candidate for this approach, having demonstrated effectiveness in LLMs. In this section, we highlight the advantages of sparse early-fusion models over their dense counterparts." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Setup. Our sparse models are based on the dropless-MoE implementation of Gale et al. [24], which eliminates token dropping during training caused by expert capacity constraints. We employ a top-\\(k\\) expert-choice routing mechanism, where each token selects its top-\\(k\\) experts among the \\(E\\) available experts. Specifically, we set \\(k = 1\\) and \\(E = 8\\), as we find this configuration to work effectively. Additionally, we incorporate an auxiliary load-balancing loss [59] with a weight of 0.01 to ensure a balanced expert utilization." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.087, + 0.878, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.247, + 0.905, + 0.289 + ], + "angle": 0, + "content": "Figure 9. Scaling laws for sparse early-fusion NMMs. We report the final validation loss averaged across interleaved, image-captions and text data." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.294, + 0.905, + 0.326 + ], + "angle": 0, + "content": "Following Abnar et al. [2], we compute training FLOPs as \\(6ND\\), where \\(N\\) represents the number of active parameters." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.334, + 0.891, + 0.35 + ], + "angle": 0, + "content": "4.1. Sparse vs dense NMMs when scaling FLOPs" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.356, + 0.907, + 0.522 + ], + "angle": 0, + "content": "We compare sparse MoE models to their dense counterparts by training models with different numbers of active parameters and varying amounts of training tokens. Figure 10 shows that, under the same inference cost (or number of active parameters), MoEs significantly outperform dense models. Interestingly, this performance gap is more pronounced for smaller model sizes. This suggests that MoEs enable models to handle heterogeneous data more effectively and specialize in different modalities. However, as dense models become sufficiently large, the gap between the two architectures gradually closes." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.531, + 0.88, + 0.547 + ], + "angle": 0, + "content": "4.2. Scaling laws for sparse early-fusion models" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.553, + 0.907, + 0.841 + ], + "angle": 0, + "content": "We train different models (ranging from 300M to 3.4B active parameters) on varying amounts of tokens (ranging from 250M to 600B) and report the final loss in Figure 9. We fit a power law to the convex hull of the lowest loss as a function of compute (FLOPs). Interestingly, the exponent \\((-0.048)\\) is close to that of dense NMMs \\((-0.049)\\), indicating that both architectures scale similarly. However, the multiplicative constant is smaller for MoEs (27.086) compared to dense models (29.574), revealing lower loss. Additionally, MoEs require longer training to reach saturation compared to dense models (Appendix C for more details). We also predict the coefficients of Eq 1 by considering \\(N\\) as the number of active parameters. Table 2 shows significantly higher \\(\\alpha\\) compared to dense models. Interestingly, \\(b\\) is significantly higher than \\(a\\), revealing that the training tokens should be scaled at a higher rate than the number of parameters when training sparse NMMs. We also experiment with a scaling law that takes into account the sparsity [2] and reached similar conclusions in Appendix C.7." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.849, + 0.9, + 0.866 + ], + "angle": 0, + "content": "4.3. Modality-aware vs. Modality-agnostic routing" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Another alternative to MoEs is modality-aware routing, where multimodal tokens are assigned to experts based on" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.09, + 0.468, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.293, + 0.483, + 0.348 + ], + "angle": 0, + "content": "Figure 10. MoE vs Dense: scaling training FLOPs. We compare MoE and dense early-fusion models when scaling both the amount of training tokens and model sizes. MoEs beat dense models when matching the number of active parameters." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.354, + 0.484, + 0.46 + ], + "angle": 0, + "content": "their modalities, similar to previous works [7, 75]. We train models with distinct image and text experts in the form of FFNs, where image tokens are processed only by the image FFN and text tokens only by the text FFN. Compared to modality-aware routing, MoEs exhibit significantly better performance on both image-caption and interleaved data as presented in Figure 11." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.494, + 0.484, + 0.51 + ], + "angle": 0, + "content": "4.4. Emergence of expert specialization and sharing" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.524, + 0.484, + 0.901 + ], + "angle": 0, + "content": "We investigate multimodal specialization in MoE architectures. In Figure 13, we visualize the normalized number of text and image tokens assigned to each expert across layers. To quantify this specialization, we compute a specialization score, defined as the average, across all experts within a layer, of \\(1 - H(p)\\), where \\(H\\) is the binary entropy of each expert's text/image token distribution. We plot this specialization score in Figure 12. Higher specialization scores indicate a tendency for experts to focus on either text or image tokens, while lower scores indicate a shared behavior. These visualizations provide clear evidence of modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases, before rising again in the last layers. This suggests that early and final layers exhibit higher modality specialization compared to mid-layers. This behavior is intuitive, as middle layers are expected to hold higher-level features that may generalize across modalities, and consistent with findings in [61] that shows increasing alignment between modalities across layers. The emergence of both expert specialization and cross-modality sharing in our modality-agnostic MoE, suggests it may be a preferable approach compared to modality-aware sparsity. All data displayed here is from an early-fusion MoE model with 1B active parameters trained for 300B tokens." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.091, + 0.905, + 0.162 + ], + "angle": 0, + "content": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
Late-fusion46.869.425.850.165.822.870.750.9
Early-fusion47.669.328.152.165.423.272.053.8
Early-MoEs48.269.830.052.165.423.669.655.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.164, + 0.905, + 0.192 + ], + "angle": 0, + "content": "Table 4. Supervised finetuning on the LLaVA mixture. All models are native at 1.5B scale and pre-trained on 300B tokens." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.206, + 0.714, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.208, + 0.887, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.364, + 0.903, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.406, + 0.906, + 0.461 + ], + "angle": 0, + "content": "Figure 11. Modality-aware vs modality agnostic routing for sparse NMMs. We compare modality-agnostic routing with modality-aware routing when scaling both the amount of training tokens and model sizes." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.466, + 0.892, + 0.481 + ], + "angle": 0, + "content": "5. Evaluation on downstream tasks with SFT" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.493, + 0.907, + 0.69 + ], + "angle": 0, + "content": "Following previous work on scaling laws, we primarily rely on validation losses. However, we generally find that this evaluation correlates well with performance on downstream tasks. To validate this, we conduct a multimodal instruction tuning stage (SFT) on the LLaVA mixture [43] and report accuracy and CIDEr scores across several VQA and captioning tasks. Table 4 confirms the ranking of different model configurations. Specifically, early fusion outperforms late fusion, and MoEs outperform dense models. However, since the models are relatively small (1.5B scale), trained from scratch, and fine-tuned on a small dataset, the overall scores are lower than the current state of the art. Further implementation details can be found in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.708, + 0.651, + 0.723 + ], + "angle": 0, + "content": "6. Related work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Large multimodal models. A long-standing research goal has been to develop models capable of perceiving the world through multiple modalities, akin to human sensory experience. Recent progress in vision and language processing has shifted the research focus from smaller, task-specific models toward large, generalist models that can handle diverse inputs [29, 67]. Crucially, pre-trained vision and language backbones often require surprisingly little adaptation to enable effective cross-modal communication [32, 47, 62, 68, 69]. Simply integrating a vision encoder with either an encoder-decoder architecture [45, 48, 63, 72]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.09, + 0.476, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.249, + 0.485, + 0.348 + ], + "angle": 0, + "content": "Figure 12. MoE specialization score. Entropy-based image/text specialization score (as described in § 4.4) across layers for two data sources: HQITP and Obelics. HQITP has a more imbalanced image-to-text token distribution, resulting in generally higher specialization. Despite this difference, both data sources exhibit a similar trend: the specialization score decreases in the early layers before increasing again in the final layers." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.351, + 0.483, + 0.534 + ], + "angle": 0, + "content": "or a decoder-only LLM has yielded highly capable multimodal systems [1, 6, 9, 13, 16, 35, 43, 49, 64, 73, 78, 83]. This late-fusion approach, where modalities are processed separately before being combined, is now well-understood, with established best practices for training effective models [34, 41, 46, 81]. In contrast, early-fusion models [8, 18, 66], which combine modalities at an earlier stage, remain relatively unexplored, with only a limited number of publicly released models [8, 18]. Unlike [18, 66], our models utilize only a single linear layer and rely exclusively on a next-token prediction loss. Furthermore, we train our models from scratch on all modalities without image tokenization." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.536, + 0.483, + 0.718 + ], + "angle": 0, + "content": "Native Multimodal Models. We define native multimodal models as those trained from scratch on all modalities simultaneously [67] rather than adapting LLMs to accommodate additional modalities. Due to the high cost of training such models, they remain relatively underexplored, with most relying on late-fusion architectures [27, 79]. Some multimodal models trained from scratch [4, 66, 76] relax this constraint by utilizing pre-trained image tokenizers such as [20, 70] to convert images into discrete tokens, integrating them into the text vocabulary. This approach enables models to understand and generate text and images, facilitating a more seamless multimodal learning process." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Scaling laws. Scaling law studies aim to predict how model performance scales with training compute. Early works [26, 31] found that LLM performance follows a power-law relationship with compute, enabling the compute-optimal estimation of the number of model parameters and training tokens at scale for a given budget. Similar research has extended these findings to sparse Mixture of Experts (MoE) models, considering factors such as sparsity, number of experts, and routing granularity [15, 33, 74]. Scaling laws have also been observed across various domains, including image models [23], video models [56], protein LLMs [14], and imitation learning [54]. However, few stud" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.09, + 0.649, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.654, + 0.09, + 0.775, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.78, + 0.09, + 0.898, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.176, + 0.907, + 0.232 + ], + "angle": 0, + "content": "Figure 13. MoE specialization frequency. Percentage of text and image tokens routed to each expert on interleaved data from Obelics. Experts are ordered for better visualization. The first layer shows the highest amount of unimodal experts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.237, + 0.907, + 0.327 + ], + "angle": 0, + "content": "ies have investigated scaling laws for multimodal models. Notably, Aghajanyan et al. [5] examined multimodal models that tokenize modalities into discrete tokens and include multimodal generation. In contrast, we focus on studying early-fusion models that take raw multimodal inputs and are trained on interleaved multimodal data." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.334, + 0.909, + 0.47 + ], + "angle": 0, + "content": "Mixture of experts (MoEs). MoEs [59] scale model capacity efficiently by sparsely activating parameters, enabling large models with reduced per-sample compute. While widely studied in LLMs [22, 30, 36, 37, 42, 65, 77, 84], MoEs remain underexplored in multimodal settings. Prior work has examined contrastive models [50], late-fusion LLMs [38, 40], and modality-specific experts [7, 12, 60]. We focus on analyzing MoEs in early-fusion multimodal models." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.503, + 0.637, + 0.518 + ], + "angle": 0, + "content": "7. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.535, + 0.909, + 0.673 + ], + "angle": 0, + "content": "Our study finds that scaling law coefficients are broadly consistent across training mixtures, though a broader exploration is needed to validate this observation. While validation loss scales predictably with compute, the extent to which this correlates with downstream performance remains unclear and warrants further investigation. The accuracy of scaling law predictions improves with higher FLOPs, but their extrapolation to extreme model sizes is still an open question (Appendix D for more details)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.703, + 0.634, + 0.719 + ], + "angle": 0, + "content": "8. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.909, + 0.903 + ], + "angle": 0, + "content": "We explore various strategies for compute-optimal pretraining of native multimodal models. We found the NMMs follow similar scaling laws to those of LLMs. Contrary to common belief, we find no inherent advantage in adopting late-fusion architectures over early-fusion ones. While both architectures exhibit similar scaling properties, early-fusion models are more efficient to train and outperform late-fusion models at lower compute budgets. Furthermore, we show that sparse architectures encourage modality-specific specialization, leading to performance improvements while maintaining the same inference cost." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.241, + 0.108 + ], + "angle": 0, + "content": "Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.116, + 0.484, + 0.222 + ], + "angle": 0, + "content": "We thank Philipp Dufter, Samira Abnar, Xiujun Li, Zhe Gan, Alexander Toshev, Yinfei Yang, Dan Busbridge, and Jason Ramapuram for many fruitful discussions. We thank Denise Hui, and Samy Bengio for infra and compute support. Finally, we thank, Louis Bethune, Pierre Ablin, Marco Cuturi, and the MLR team at Apple for their support throughout the project." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.238, + 0.188, + 0.254 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.263, + 0.484, + 0.332 + ], + "angle": 0, + "content": "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.334, + 0.484, + 0.403 + ], + "angle": 0, + "content": "[2] Samira Abnar, Harshay Shah, Dan Busbridge, Alaaeldin Mohamed Elnouby Ali, Josh Susskind, and Vimal Thilak. Parameters vs flops: Scaling laws for optimal sparsity for mixture-of-experts language models. arXiv preprint arXiv:2501.12370, 2025. 2, 3, 6, 18, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.406, + 0.484, + 0.473 + ], + "angle": 0, + "content": "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.476, + 0.484, + 0.545 + ], + "angle": 0, + "content": "[4] Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.548, + 0.484, + 0.63 + ], + "angle": 0, + "content": "[5] Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In International Conference on Machine Learning, pages 265-279. PMLR, 2023. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.633, + 0.484, + 0.715 + ], + "angle": 0, + "content": "[6] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.717, + 0.484, + 0.786 + ], + "angle": 0, + "content": "[7] Hangbo Bao, Wenhui Wang, Li Dong, Qiang Liu, Owais Khan Mohammed, Kriti Aggarwal, Subhojit Som, and Furu Wei. Vlmo: Unified vision-language pretraining with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358, 2021. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.789, + 0.484, + 0.83 + ], + "angle": 0, + "content": "[8] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Săgnak Taşirlar. Introducing our multimodal models, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.832, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[9] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1, 8" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.263, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.162 + ], + "angle": 0, + "content": "[10] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.164, + 0.905, + 0.218 + ], + "angle": 0, + "content": "[11] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.219, + 0.905, + 0.288 + ], + "angle": 0, + "content": "[12] Junyi Chen, Longteng Guo, Jia Sun, Shuai Shao, Zehuan Yuan, Liang Lin, and Dongyu Zhang. Eve: Efficient vision-language pre-training with masked prediction and modality-aware moe. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1110-1119, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.289, + 0.905, + 0.37 + ], + "angle": 0, + "content": "[13] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.372, + 0.905, + 0.412 + ], + "angle": 0, + "content": "[14] Xingyi Cheng, Bo Chen, Pan Li, Jing Gong, Jie Tang, and Le Song. Training compute-optimal protein language models. bioRxiv, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.414, + 0.905, + 0.496 + ], + "angle": 0, + "content": "[15] Aidan Clark, Diego de Las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bogdan Damoc, Blake Hechtman, Trevor Cai, Sebastian Borgeaud, et al. Unified scaling laws for routed language models. In International conference on machine learning, pages 4057-4086. PMLR, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.498, + 0.905, + 0.566 + ], + "angle": 0, + "content": "[16] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.568, + 0.905, + 0.65 + ], + "angle": 0, + "content": "[17] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.651, + 0.905, + 0.704 + ], + "angle": 0, + "content": "[18] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. arXiv preprint arXiv:2406.11832, 2024.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.707, + 0.905, + 0.774 + ], + "angle": 0, + "content": "[19] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.776, + 0.905, + 0.844 + ], + "angle": 0, + "content": "[20] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12873-12883, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[21] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 3, 6, 13" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[22] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[23] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders, 2024. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.249, + 0.482, + 0.303 + ], + "angle": 0, + "content": "[24] Trevor Gale, Deepak Narayanan, Cliff Young, and Matei Zaharia. Megablocks: Efficient sparse training with mixture-of-experts. Proceedings of Machine Learning and Systems, 5:288-304, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.306, + 0.482, + 0.36 + ], + "angle": 0, + "content": "[25] Alexander Hagele, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.363, + 0.482, + 0.459 + ], + "angle": 0, + "content": "[26] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, pages 30016-30030, 2022. 2, 3, 4, 8, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.462, + 0.482, + 0.543 + ], + "angle": 0, + "content": "[27] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, et al. Language is not all you need: Aligning perception with language models. Advances in Neural Information Processing Systems, 36:72096-72109, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.546, + 0.482, + 0.574 + ], + "angle": 0, + "content": "[28] Peter J. Huber. Robust Estimation of a Location Parameter, pages 492-518. Springer New York, New York, NY, 1992. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.576, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[29] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.633, + 0.482, + 0.701 + ], + "angle": 0, + "content": "[30] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.704, + 0.482, + 0.771 + ], + "angle": 0, + "content": "[31] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 2, 8, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.775, + 0.482, + 0.83 + ], + "angle": 0, + "content": "[32] Jing Yu Koh, Ruslan Salakhutdinov, and Daniel Fried. Grounding language models to images for multimodal inputs and outputs. In International Conference on Machine Learning, pages 17283-17300. PMLR, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.832, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[33] Jakub Krajewski, Jan Ludziejewski, Kamil Adamczewski, Maciej Pioro, Michal Krutul, Szymon Antoniak, Kamil Ciebiera, Krystian Król, Tomasz Odrzygoźdź, Piotr Sankowski, et al. Scaling laws for fine-grained mixture of experts. arXiv preprint arXiv:2402.07871, 2024. 8, 18" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.176 + ], + "angle": 0, + "content": "[34] Hugo Laurencon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024. 3, 5, 8, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.179, + 0.905, + 0.219 + ], + "angle": 0, + "content": "[35] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? arXiv preprint arXiv:2405.02246, 2024. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.221, + 0.905, + 0.29 + ], + "angle": 0, + "content": "[36] Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.292, + 0.905, + 0.347 + ], + "angle": 0, + "content": "[37] Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. Base layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265-6274. PMLR, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.349, + 0.905, + 0.404 + ], + "angle": 0, + "content": "[38] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.406, + 0.905, + 0.474 + ], + "angle": 0, + "content": "[39] Jeffrey Li, Alex Fang, Georgios Smyrnis, Maor Ivgi, Matt Jordan, Samir Gadre, Hritik Bansal, Etash Guha, Sedrick Keh, Kushal Arora, et al. Datacomp-lm: In search of the next generation of training sets for language models. arXiv preprint arXiv:2406.11794, 2024. 3, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.476, + 0.905, + 0.532 + ], + "angle": 0, + "content": "[40] Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.905, + 0.601 + ], + "angle": 0, + "content": "[41] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.604, + 0.905, + 0.659 + ], + "angle": 0, + "content": "[42] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.661, + 0.905, + 0.716 + ], + "angle": 0, + "content": "[43] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 1, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.718, + 0.905, + 0.746 + ], + "angle": 0, + "content": "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.747, + 0.905, + 0.815 + ], + "angle": 0, + "content": "[45] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. In The Eleventh International Conference on Learning Representations, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[46] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Duffer, Dhruti Shah, Xianzhi Du, Futang Peng, Anton Belyi, et al. Mm1: methods, analysis and insights from multimodal llm pre-training. In European Conference on Computer Vision, pages 304–323. Springer, 2025. 5, 8, 13" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.926, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.146 + ], + "angle": 0, + "content": "[47] Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. Linearly mapping from image to text space. In *The Eleventh International Conference on Learning Representations*, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[48] David Mizrahi, Roman Bachmann, Oguzhan Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling. Advances in Neural Information Processing Systems, 36:58363-58408, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.221, + 0.482, + 0.317 + ], + "angle": 0, + "content": "[49] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.319, + 0.482, + 0.387 + ], + "angle": 0, + "content": "[50] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. Advances in Neural Information Processing Systems, 35:9564-9576, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.39, + 0.482, + 0.43 + ], + "angle": 0, + "content": "[51] Jorge Nocedal. Updating quasi newton matrices with limited storage. Mathematics of Computation, 35(151):951-958, 1980. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.433, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[52] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.504, + 0.482, + 0.544 + ], + "angle": 0, + "content": "[53] Tim Pearce and Jinyeop Song. Reconciling kaplan and chinchilla scaling laws. arXiv preprint arXiv:2406.12907, 2024. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.547, + 0.482, + 0.602 + ], + "angle": 0, + "content": "[54] Tim Pearce, Tabish Rashid, Dave Bignell, Raluca Georgescu, Sam Devlin, and Katja Hofmann. Scaling laws for pre-training agents and world models. arXiv preprint arXiv:2411.04434, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.604, + 0.482, + 0.686 + ], + "angle": 0, + "content": "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 1, 3, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.688, + 0.482, + 0.757 + ], + "angle": 0, + "content": "[56] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pretraining from videos. arXiv preprint arXiv:2501.05453, 2025.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.759, + 0.482, + 0.829 + ], + "angle": 0, + "content": "[57] Kanchana Ranasinghe, Brandon McKinzie, Sachin Ravi, Yinfei Yang, Alexander Toshev, and Jonathon Shlens. Perceptual grouping in contrastive vision-language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5571-5584, 2023. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.831, + 0.482, + 0.857 + ], + "angle": 0, + "content": "[58] Noam Shazeer. Glu variants improve transformer. arXiv preprint arXiv:2002.05202, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[59] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.119 + ], + "angle": 0, + "content": "of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.121, + 0.905, + 0.188 + ], + "angle": 0, + "content": "[60] Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, and Yuxiong He. Scaling vision-language models with sparse mixture of experts. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.19, + 0.905, + 0.245 + ], + "angle": 0, + "content": "[61] Mustafa Shukor and Matthieu Cord. Implicit multimodal alignment: On the generalization of frozen llms to multimodal inputs. Advances in Neural Information Processing Systems, 37:130848-130886, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.247, + 0.905, + 0.302 + ], + "angle": 0, + "content": "[62] Mustafa Shukor, Corentin Dancette, and Matthieu Cord. eplalm: Efficient perceptual augmentation of language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22056-22069, 2023. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.304, + 0.905, + 0.357 + ], + "angle": 0, + "content": "[63] Mustafa Shukor, Corentin Dancette, Alexandre Rame, and Matthieu Cord. Unival: Unified model for image, video, audio and language tasks. Transactions on Machine Learning Research Journal, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.359, + 0.905, + 0.44 + ], + "angle": 0, + "content": "[64] Mustafa Shukor, Dana Aubakirova, Francesco Capuano, Pepijn Kooijmans, Steven Palma, Adil Zoutine, Michel Ar-actingi, Caroline Pascal, Martino Russi, Andres Marafioti, et al. Smolvla: A vision-language-action model for affordable and efficient robotics. arXiv preprint arXiv:2506.01844, 2025. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.442, + 0.905, + 0.51 + ], + "angle": 0, + "content": "[65] Xingwu Sun, Yanfeng Chen, Yiqing Huang, Ruobing Xie, Jiaqi Zhu, Kai Zhang, Shuaipeng Li, Zhen Yang, Jonny Han, Xiaobo Shu, et al. Hunyuan-large: An open-source moe model with 52 billion activated parameters by tencent. arXiv preprint arXiv:2411.02265, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.511, + 0.905, + 0.55 + ], + "angle": 0, + "content": "[66] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.553, + 0.905, + 0.621 + ], + "angle": 0, + "content": "[67] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.623, + 0.905, + 0.678 + ], + "angle": 0, + "content": "[68] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyals, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.679, + 0.905, + 0.733 + ], + "angle": 0, + "content": "[69] Théophane Vallaeys, Mustafa Shukor, Matthieu Cord, and Jakob Verbeek. Improved baselines for data-efficient perceptual augmentation of llms. arXiv preprint arXiv:2403.13499, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.735, + 0.905, + 0.788 + ], + "angle": 0, + "content": "[70] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.79, + 0.905, + 0.817 + ], + "angle": 0, + "content": "[71] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[72] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. 7" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[73] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.162, + 0.482, + 0.259 + ], + "angle": 0, + "content": "[74] Siqi Wang, Zhengyu Chen, Bei Li, Keqing He, Min Zhang, and Jingang Wang. Scaling laws across model architectures: A comparative analysis of dense and MoE models in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5583-5595, Miami, Florida, USA, 2024. Association for Computational Linguistics. 8, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.26, + 0.482, + 0.328 + ], + "angle": 0, + "content": "[75] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.33, + 0.482, + 0.385 + ], + "angle": 0, + "content": "[76] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.386, + 0.482, + 0.454 + ], + "angle": 0, + "content": "[77] Tianwen Wei, Bo Zhu, Liang Zhao, Cheng Cheng, Biye Li, Weiwei Lu, Peng Cheng, Jianhao Zhang, Xiaoyu Zhang, Liang Zeng, et al. Skywork-moe: A deep dive into training techniques for mixture-of-experts language models. arXiv preprint arXiv:2406.06563, 2024.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.455, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[78] Le Xue, Manli Shu, Anas Awadalla, Jun Wang, An Yan, Senthil Purushwalkam, Honglu Zhou, Viraj Prabhu, Yutong Dai, Michael S Ryoo, et al. xgen-mm (blip-3): A family of open large multimodal models. arXiv preprint arXiv:2408.08872, 2024. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.525, + 0.482, + 0.579 + ], + "angle": 0, + "content": "[79] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.581, + 0.482, + 0.635 + ], + "angle": 0, + "content": "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.637, + 0.482, + 0.704 + ], + "angle": 0, + "content": "[81] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Duffer, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.707, + 0.482, + 0.773 + ], + "angle": 0, + "content": "[82] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.776, + 0.482, + 0.843 + ], + "angle": 0, + "content": "[83] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.845, + 0.482, + 0.9 + ], + "angle": 0, + "content": "[84] Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906, 2022. 8" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.275, + 0.087, + 0.723, + 0.14 + ], + "angle": 0, + "content": "Scaling Laws for Native Multimodal Models Supplementary Material" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.157, + 0.441, + 0.174 + ], + "angle": 0, + "content": "This supplementary material is organized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.196, + 0.483, + 0.225 + ], + "angle": 0, + "content": "- Appendix A: contains the implementation details and the hyperparameters used to train our models." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.226, + 0.483, + 0.255 + ], + "angle": 0, + "content": "- Appendix B: contains detailed comparison between early and late fusion models." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.257, + 0.483, + 0.285 + ], + "angle": 0, + "content": "- Appendix C: contains more details about scaling laws derivation, evaluation and additional results." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.287, + 0.483, + 0.315 + ], + "angle": 0, + "content": "- Appendix D: contains discussion about the paper limitations." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.317, + 0.483, + 0.347 + ], + "angle": 0, + "content": "- Appendix E: contains more results about MoEs and modality specialization." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.196, + 0.483, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.396, + 0.284, + 0.414 + ], + "angle": 0, + "content": "A. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.433, + 0.484, + 0.9 + ], + "angle": 0, + "content": "In Table 6, we show the pre-training hyperparameters for different model configurations used to derive the scaling laws. The number of parameters ranges from 275M to 3.7B, with model width increasing accordingly, while the depth remains fixed at 24 layers. Learning rates vary by model size, decreasing as the model scales up. Based on empirical experiments and estimates similar to [46], we found these values to be effective in our setup. Training is optimized using a fully decoupled AdamW optimizer with momentum values \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.95\\), and a weight decay of \\(1\\mathrm{e} - 4\\). The batch size is set to 2k samples, which account for 2M tokens, given 1k context length. Gradient clipping is set to 1.0, with a maximum warmup duration of 5k iterations, adjusted for shorter training runs: 1k and 2.5k warmup steps for models trained between 1k-4k and 5k-15k steps, respectively. For MoEs, we found that longer warmup is significantly better, so we adopt a 2.5k warmup for all runs under 20k steps. We use a constant learning rate schedule with cooldown during the final \\(20\\%\\) of training, gradually reducing to zero following an inverse square root schedule. For vision processing, image inputs are divided into (14, 14) patches, with augmentations including Random Resized Crop (resizing images to 224px with a scale range of [0.4, 1.0]) and Random Horizontal Flip with a probability of 0.5. We train our models on mixture of interleaved, image captions and text only data Table 5. For late fusion models, we found that using smaller learning rate for the vision encoder significantly boost the performance Table 8, and when both the encoder and decoder are initialized (Appendix B.7) we found that freezing the vision encoder works best Table 7." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.155, + 0.905, + 0.226 + ], + "angle": 0, + "content": "
Data typedataset#samplessampling prob.
DFN [21]2B27%
Image-CaptionCOYO [11]600M11.25%
HQITP[57]400M6.75%
InterleavedObelics [34]141M Docs45%
TextDCLM [39]6.6T Toks10%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.228, + 0.905, + 0.27 + ], + "angle": 0, + "content": "Table 5. Pre-training data mixture. Unless otherwise specified, the training mixture contains \\(45\\%\\), \\(45\\%\\) and \\(10\\%\\) of image captions, interleaved documents and text-only data." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.293, + 0.905, + 0.665 + ], + "angle": 0, + "content": "
Early-fusion
Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35e-44.2e-44e-43.5e-4
Late-fusion
Params289M494M1B1.75B2.43B3.7B
vision encoder width384512768102411841536
vision encoder depth24
width76810241536204824643072
depth24
Learning rate1.5e-31.5e-35e-44.2e-43.8e-43.3e-4
Early-fusion MoEs
Active Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35 e-44.2e-44e-43.5e-4
Training tokens2.5B-600B
OptimizerFully decoupled AdamW [44]
Optimizer Momentumβ1=0.9, β2=0.95
Minimum Learning rate0
Weight decay1e-4
Batch size2k
Patch size(14, 14)
Gradient clipping1.0
MAximum Warmup iterations5k
Augmentations: \nRandomResizedCrop \nsize224px
scale[0.4, 1.0]
RandomHorizontalFlipp=0.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.678, + 0.908, + 0.72 + ], + "angle": 0, + "content": "Table 6. Pre-training hyperparameters We detail the hyperparameters used for pre-training different model configurations to derive scaling laws." + }, + { + "type": "table", + "bbox": [ + 0.538, + 0.751, + 0.887, + 0.85 + ], + "angle": 0, + "content": "
Vision encoder\nlr schedulerInterleaved\n(CE)Image-Caption\n(CE)Text\n(CE)AVG\n(CE)AVG (SFT)\n(Acc)
12.5212.152.8672.51343.49
0.12.5022.0662.8622.47752.27
0.012.5022.0662.8592.47653.76
0.0012.5132.0662.8572.479-
0 (frozen)2.5042.0612.8562.47454.14
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.852, + 0.907, + 0.892 + ], + "angle": 0, + "content": "Table 7. Vision encoder scalar. Freezing the vision encoder works best when initializing late-fusion models with pre-trained models." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.09, + 0.365, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.09, + 0.619, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.091, + 0.889, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.239, + 0.249, + 0.761, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.281, + 0.908, + 0.311 + ], + "angle": 0, + "content": "Figure 14. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the model size and the number of training tokens. The gap decreases mainly due to scaling models size." + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.328, + 0.368, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.329, + 0.625, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.637, + 0.348, + 0.895, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.507, + 0.908, + 0.551 + ], + "angle": 0, + "content": "Figure 15. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models become better when increasing the proportion of interleaved documents. Early and late fusion has 1.63B and 1.75B parameters respectively." + }, + { + "type": "table", + "bbox": [ + 0.114, + 0.577, + 0.465, + 0.649 + ], + "angle": 0, + "content": "
Vision encoder lrScalerInterleaved (CE)Image-Caption (CE)Text (CE)AVG (CE)AVG (SFT) (Acc)
0.12.6742.2193.0722.65534.84
0.012.6722.1973.0712.64738.77
0.0012.6742.2183.0732.65538.46
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.65, + 0.483, + 0.692 + ], + "angle": 0, + "content": "Table 8. Vision encoder scalar. Reducing the learning rate for the vision encoder is better when training late-fusion models from scratch." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.696, + 0.285, + 0.714 + ], + "angle": 0, + "content": "B. Late vs early fusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.721, + 0.483, + 0.751 + ], + "angle": 0, + "content": "This section provides additional comparison between early and late fusion models." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.758, + 0.248, + 0.775 + ], + "angle": 0, + "content": "B.1. Scaling FLOPs" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Figure 14 compares early-fusion and late-fusion models when scaling FLOPs. Specifically, for each model size, we train multiple models using different amounts of training tokens. The performance gap between the two approaches mainly decreases due to increasing model sizes rather than increasing the number of training tokens. Despite the decreasing gap, across all the models that we train, early-fusion consistently outperform late-fusion." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.578, + 0.827, + 0.594 + ], + "angle": 0, + "content": "B.2. Changing the training data mixture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.6, + 0.907, + 0.797 + ], + "angle": 0, + "content": "We analyze how the performance gap between early and late fusion models changes with variations in the training data mixture. As shown in Figure 16 and Figure 15, when fixing the model size, increasing the ratio of text and interleaved data favors early fusion. Interestingly, the gap remains largely unchanged for other data types. We also observe interference effects between different data types. Specifically, increasing the amount of interleaved data negatively impacts performance on image captions and vice versa. Additionally, increasing the proportion of text-only data slightly improves interleaved performance but increases loss on image captions. Overall, we find that text-only and interleaved data are correlated across different setups." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.803, + 0.907, + 0.833 + ], + "angle": 0, + "content": "B.3. Scaling image resolution is in favor of early-fusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.903 + ], + "angle": 0, + "content": "We examine how both architectures perform with varying image resolution. We fix the number of model parameters to 1.63B and 1.75B for early and late fusion respectively. All models are trained for 100K steps or 200B tokens. Since" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.113, + 0.357, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.113, + 0.627, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.112, + 0.89, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.272, + 0.908, + 0.314 + ], + "angle": 0, + "content": "Figure 16. Early vs late fusion: changing the amount of text-only data in the training mixture (isoFLOPs). We vary the ratio of text-only data and plot the final training loss. The gap increases with the text data ratio in favor of early fusion model. Early fusion has 1.63B parameters and late fusion 1.75B parameters." + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.342, + 0.29, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.342, + 0.471, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.499, + 0.483, + 0.609 + ], + "angle": 0, + "content": "Figure 17. Early vs late fusion: training with different image resolutions (isoFLOPs). For the same training FLOPs we vary the image resolution (and thus the number of image tokens) during training and report the final training loss. Increasing resolution, hurts the performance on text and interleaved documents, while helping image captioning. The gap stays almost the same on text and interleaved data while slightly increase on image captioning in favor of early fusion." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.615, + 0.483, + 0.75 + ], + "angle": 0, + "content": "the patch size remains constant, increasing the resolution results in a higher number of visual tokens. For all resolutions, we maintain the same number of text tokens. As shown in Figure 17, the early-fusion model consistently outperforms the late-fusion model across resolutions, particularly for multimodal data, with the performance gap widening at higher resolutions. Additionally, we observe that the loss on text and interleaved data increases as resolution increases." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.758, + 0.483, + 0.79 + ], + "angle": 0, + "content": "B.4. Early-fusion is consistently better when matching the late-fusion model size" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.483, + 0.901 + ], + "angle": 0, + "content": "In this section, we compare the late-fusion model with different configurations of early-fusion one. Specifically, we train early-fusion models that match the late-fusion model in total parameters (Params), text model size (Text), and FLOPs (FLOPs), assuming 45-45-10 training mixture. As shown in Figure 18, early fusion consistently outperforms late fusion when normalized by total parameters, followed" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.343, + 0.906, + 0.388 + ], + "angle": 0, + "content": "by normalization by FLOPs. When matching the text model size, early fusion performs better at higher ratios of interleaved data." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.397, + 0.816, + 0.413 + ], + "angle": 0, + "content": "B.5. Different late-fusion configuration" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.418, + 0.907, + 0.555 + ], + "angle": 0, + "content": "We examine how this scaling changes with different late-fusion configurations. Instead of scaling both the vision and text models equally, as done in the main paper, we fix the vision encoder size to 300M and scale only the text model. Figure 19 shows that late-fusion models lag behind at smaller model sizes, with the gap closing significantly as the text model scales. This suggests that allocating more parameters to shared components is more beneficial, further supporting the choice of early-fusion models." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.562, + 0.743, + 0.579 + ], + "angle": 0, + "content": "B.6. Different context lengths" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.584, + 0.906, + 0.706 + ], + "angle": 0, + "content": "In the paper, we use a 1k context length following [31]. Also following, this paper, we ignore the context length effect, as the model dimension dominates the training compute estimate. Moreover, [53] empirically found that scaling coefficients are robust to context length. Nevertheless, Our initial experiments (Figure 20) indicate that scaling the context length did not significantly affect the comparison between late and early fusion." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.713, + 0.806, + 0.729 + ], + "angle": 0, + "content": "B.7. Initializing from LLM and CLIP" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.901 + ], + "angle": 0, + "content": "We study the case where both late and early fusion models are initialized from pre-trained models, specifically DCLM-1B [39] and CLIP-ViT-L [55] for late fusion. Interestingly, Figure 21 shows that for text and interleaved multimodal documents, early fusion can match the performance of late fusion when trained for longer. However, closing the gap on image caption data remains more challenging. Notably, when considering the overall training cost, including that of pre-trained models, early fusion requires significantly longer training to compensate for the vision encoder's pretraining cost." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.09, + 0.355, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.089, + 0.648, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.087, + 0.882, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.264, + 0.676, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.292, + 0.908, + 0.335 + ], + "angle": 0, + "content": "Figure 18. Early vs late fusion: changing the training mixture and early-fusion configuration. We vary the training mixtures and plot the final training loss for different configuration of early fusion models. For the same number of total parameters early fusion consistently outperform late fusion." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.351, + 0.355, + 0.542 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.353, + 0.627, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.353, + 0.898, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.544, + 0.736, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.601, + 0.908, + 0.644 + ], + "angle": 0, + "content": "Figure 19. Early vs late fusion: scaling training FLOPs while fixing the vision encoder size. We compare early and late fusion models when scaling both the amount of training tokens and model sizes. For late fusion mdoels, we fix the vision encoder size (300M) and scale the text model (250M, 834M, 2B, 3B). The gap between early and late get tighter when scaling the text model." + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.669, + 0.465, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.863, + 0.48, + 0.878 + ], + "angle": 0, + "content": "Figure 20. Early vs late fusion with different context lengths." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.672, + 0.909, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.818, + 0.908, + 0.874 + ], + "angle": 0, + "content": "Figure 21. Early vs late fusion when initializing the encoder and decoder. Early-fusion can match the performance of late-fusion models when trained for longer. However, the gap is bigger on image-caption data." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.09, + 0.226, + 0.108 + ], + "angle": 0, + "content": "C. Scaling laws" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.114, + 0.283, + 0.132 + ], + "angle": 0, + "content": "C.1. Fitting \\(L = F(N,D)\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.137, + 0.483, + 0.168 + ], + "angle": 0, + "content": "Following [26], we determine the parameters that minimize the following objective across all our runs \\( i \\):" + }, + { + "type": "equation", + "bbox": [ + 0.094, + 0.174, + 0.483, + 0.21 + ], + "angle": 0, + "content": "\\[\n\\min _ {a, b, e, \\alpha , \\beta} \\sum_ {i} \\operatorname {H u b e r} _ {\\delta} \\left(\\operatorname {L S E} \\left(a - \\alpha \\log N _ {i}, b - \\beta \\log D _ {i}, e\\right) - \\log L _ {i}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.211, + 0.484, + 0.301 + ], + "angle": 0, + "content": "We perform this optimization across various initialization ranges and select the parameters that achieve the lowest loss across all initializations. Specifically, our grid search spans \\(\\{0, 0.5, 2.5\\}\\) for \\(\\alpha\\) and \\(\\beta\\), \\(\\{0, 5, 10, \\dots, 30\\}\\) for \\(a\\) and \\(b\\), and \\(\\{-1, -0.5, 1, 0.5\\}\\) for \\(e\\). We use the L-BFGS algorithm with \\(\\delta = 1e - 3\\)." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.311, + 0.379, + 0.328 + ], + "angle": 0, + "content": "C.2. Fitting \\(N \\propto C^{a}, D \\propto C^{b}, D \\propto N^{d}\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.333, + 0.485, + 0.697 + ], + "angle": 0, + "content": "While these equations have a closed-form solution [26] for early-fusion models that can be derived from Eq 1, this is not the case for late-fusion models without specifying either the vision encoder or text model size. To ensure a fair comparison, we derive these equations for both models, by performing linear regression in log space. We found that the regression is very close to the coefficient found with closed-form derivation Table 9. For instance, to derive \\( N = K_{a}C^{a} \\), given a FLOP budget \\( C \\) and a set of linearly spaced tokens \\( D_{i} \\) ranging from 10B to 600B, we compute the model size for each \\( D_{i} \\) as \\( N_{i} = \\frac{C}{6D} \\) for early fusion and \\( N_{i} = \\frac{C}{6D} + 0.483 * N_{v} \\) for late fusion (for the 45-45-10 mixture, \\( D_{v} = 0.544D \\), thus \\( C = 6D(0.544N_{v} + N_{t}) \\)). We then apply Eq 1 to obtain the loss for each model size and select \\( N \\) that has the minimum loss. We repeat this for all FLOP values corresponding to our runs, resulting in a set of points \\( (C, N_{opt}) \\) that we use to regress \\( a \\) and \\( K_{a} \\). We follow a similar procedure to find \\( b \\) and \\( d \\). For late-fusion models, we regress a linear model to determine \\( N_{v} \\) given \\( N \\). Notably, even though we maintain a fixed width ratio for late-fusion models, this approach is more accurate, as embedding layers prevent a strictly fixed ratio between text and vision model sizes. We present the regression results in Figure 22." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.711, + 0.465, + 0.763 + ], + "angle": 0, + "content": "
Modelabdndn
Closed form0.526490.473510.899381.11188-0.05298
Regression0.523910.475340.900521.10224-0.04933
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.763, + 0.483, + 0.806 + ], + "angle": 0, + "content": "Table 9. Scaling laws parameters for early-fusion. Doing regression to derive the scaling laws coefficients leads to very close results to using the closed-form solution." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.833, + 0.242, + 0.85 + ], + "angle": 0, + "content": "C.3. Fitting \\(L \\propto C^c\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.902 + ], + "angle": 0, + "content": "To determine the relationship between the final model loss and the compute budget \\( C \\), we begin by interpolating the points corresponding to the same model size and compute" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.213 + ], + "angle": 0, + "content": "the convex hull that covers the minimum loss achieved by all runs for each FLOP. This results in a continuous mapping from the FLOPs to the lowest loss. We consider a range of FLOPs, excluding very small values \\((\\leq 3e^{19})\\), and construct a dataset of \\((C,L)\\) for linearly spaced compute \\(C\\). Using this data, we find the linear relationship between \\(L\\) and \\(C\\) in the log space and deduce the exponent \\(c\\). We visualize the results in Figure 26." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.23, + 0.707, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.332, + 0.637, + 0.342 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.229, + 0.892, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.81, + 0.332, + 0.822, + 0.342 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.348, + 0.716, + 0.447 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.634, + 0.451, + 0.646, + 0.46 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.348, + 0.89, + 0.448 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.807, + 0.451, + 0.82, + 0.46 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.462, + 0.746, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.646, + 0.563, + 0.659, + 0.573 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.747, + 0.462, + 0.897, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.817, + 0.563, + 0.829, + 0.573 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.593, + 0.907, + 0.634 + ], + "angle": 0, + "content": "Figure 22. Regression results of the scaling laws coefficients. our estimation of the scaling coefficients is close to the closed form solution." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.667, + 0.874, + 0.684 + ], + "angle": 0, + "content": "C.4. Scaling laws for different target data type" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.907, + 0.901 + ], + "angle": 0, + "content": "In Figure 27, we derive the scaling laws for different target data types. In general, we observe that the model learns image captioning faster than interleaved data, as indicated by the higher absolute value of the scaling exponent (e.g., 0.062 vs 0.046), despite using the same data ratio for captioning and interleaved data (45% each). Additionally, we find that the model learns more slowly on text-only data, likely due to the smaller amount of text-only data (10%). Across model configurations, we find that early fusion scales similarly to late fusion on image captioning but has a lower multiplicative constant (49.99 vs 47.97). For MoEs, the model learns faster but exhibits a higher multiplicative constant. On text and interleaved data, early and late fusion models scale similarly and achieve comparable" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.136, + 0.086, + 0.511, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.087, + 0.891, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.343, + 0.905, + 0.358 + ], + "angle": 0, + "content": "Figure 23. Observed vs predicted loss. We visualize the loss predicted by our scaling laws (Eq 1) and the actual loss achieved by each run." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.387, + 0.483, + 0.418 + ], + "angle": 0, + "content": "performance. However, MoEs demonstrate better overall performance while learning slightly more slowly." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.43, + 0.463, + 0.446 + ], + "angle": 0, + "content": "C.5. Scaling laws for different training mixtures" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.453, + 0.483, + 0.651 + ], + "angle": 0, + "content": "We investigate how the scaling laws change when modifying the training mixtures. Specifically, we vary the ratio of image caption, interleaved, and text-only data and report the results in Figure 28. Overall, we observe similar scaling trends, with only minor changes in the scaling coefficients. Upon closer analysis, we find that increasing the ratio of a particular data type in the training mixture, leads to a corresponding increase in its scaling exponent. For instance, increasing the ratio of image captions from \\(30\\%\\) to \\(40\\%\\) raises the absolute value of the exponent from 0.056 to 0.061. However, for text-only data, we do not observe significant changes in the scaling coefficients when varying its proportion in the training mixture." + }, + { + "type": "table", + "bbox": [ + 0.154, + 0.667, + 0.426, + 0.719 + ], + "angle": 0, + "content": "
ParameterMSER2MAE (%)
Held-in0.00290.98070.8608
Held-out0.00040.96820.5530
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.722, + 0.483, + 0.765 + ], + "angle": 0, + "content": "Table 10. Scaling laws prediction errors. We report the mean square error, R2 and mean absolute error for the loss prediction for held-in and held-out (8B model) data." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.801, + 0.465, + 0.849 + ], + "angle": 0, + "content": "
ModelEαβabd
Avg1.809220.298420.332090.543020.483010.92375
Std0.338110.101010.028920.088130.057870.23296
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.85, + 0.483, + 0.879 + ], + "angle": 0, + "content": "Table 11. Scaling laws sensitivity. We report the mean and standard deviation after bootstrapping with 100 iterations." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.386, + 0.733, + 0.401 + ], + "angle": 0, + "content": "C.6. Scaling laws evaluation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.409, + 0.907, + 0.56 + ], + "angle": 0, + "content": "For each model size and number of training tokens, we compute the loss using the estimated functional form in Eq 1 and compare it to the actual loss observed in our runs. Figure 23, Figure 24, and Table 10 visualizes these comparisons, showing that our estimation is highly accurate, particularly for lower loss values and larger FLOPs. We also assess our scaling laws in an extrapolation setting, predicting performance beyond the model sizes used for fitting. Notably, our approach estimates the performance of an 8B model with reasonable accuracy." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.562, + 0.909, + 0.7 + ], + "angle": 0, + "content": "Additionally, we conduct a sensitivity analysis using bootstrapping. Specifically, we sample \\(P\\) points with replacement (\\(P\\) being the total number of trained models) and re-estimate the scaling law coefficients. This process is repeated 100 times, and we report the mean and standard deviation of each coefficient. Table 11 shows that our estimation is more precise for \\(\\beta\\) than for \\(\\alpha\\), primarily due to the smaller number of model sizes relative to the number of different token counts used to derive the scaling laws." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.71, + 0.792, + 0.727 + ], + "angle": 0, + "content": "C.7. Scaling laws for sparse NMMs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.733, + 0.907, + 0.87 + ], + "angle": 0, + "content": "Similar to dense models, we fit a parametric loss function (Eq 1) to predict the loss of sparse NMMs based on the number of parameters and training tokens, replacing the total parameter count with the number of active parameters. While incorporating sparsity is standard when deriving scaling laws for MoEs [2, 33, 74], we focus on deriving scaling laws specific to the sparsity level used in our MoE setup. This yields coefficients that are implicitly conditioned on the sparsity configuration." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.909, + 0.903 + ], + "angle": 0, + "content": "We also experiment with a sparsity-aware formulation of the scaling law as proposed in [2], and observe consistent" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.108, + 0.087, + 0.473, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.322, + 0.483, + 0.379 + ], + "angle": 0, + "content": "Figure 24. Observed vs predicted loss. We visualize the loss predicted by our scaling laws Eq 1 and the actual loss achieved by each run. We can reliably predict the performance of models larger (8B params) than those used to fit the scaling laws." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.383, + 0.484, + 0.475 + ], + "angle": 0, + "content": "trends (Table 12). In particular, the exponents associated with model size \\((N)\\) are substantially larger than those for training tokens \\((\\beta)\\), reinforcing the importance of scaling model size in sparse architectures. Additionally, we observe that the terms governing the scaling of active parameters decompose into two components." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.492, + 0.348, + 0.509 + ], + "angle": 0, + "content": "D. Discussion and Limitations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.519, + 0.484, + 0.625 + ], + "angle": 0, + "content": "Scaling laws for multimodal data mixtures. Our scaling laws study spans different model configurations and training mixtures. While results suggest that the scaling law coefficients remain largely consistent across mixtures, a broader exploration of mixture variations is needed to validate this observation and establish a unified scaling law that accounts for this factor." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.627, + 0.483, + 0.717 + ], + "angle": 0, + "content": "Scaling laws and performance on downstream tasks. Similar to previous scaling law studies, our analysis focuses on pretraining performance as measured by the validation loss. However, the extent to which these findings translate to downstream performance remains an open question and requires further investigation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.718, + 0.483, + 0.809 + ], + "angle": 0, + "content": "Extrapolation to larger scales. The accuracy of scaling law predictions improves with increasing FLOPs Appendix C. Furthermore, we validate our laws when extrapolating to larger model sizes (Appendix C.6). However, whether these laws can be reliably extrapolated to extremely large model sizes remains an open question." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.903 + ], + "angle": 0, + "content": "High resolution and early-fusion models. Training early-fusion models with high-resolution inputs leads to a significant increase in vision tokens. While pooling techniques have been widely adopted for late-fusion models, alternative approaches may be necessary for early fusion. Given the similarity of early-fusion models to LLMs, it appears" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.121 + ], + "angle": 0, + "content": "that techniques for extending context length could be beneficial." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.121, + 0.907, + 0.243 + ], + "angle": 0, + "content": "Scaling laws for multimodal MoEs models. For MoEs, we consider only a single configuration (top-1 routing with 8 experts). We found this configuration to work reasonably well in our setup, and follow a standard MoEs implementation. However, the findings may vary when optimizing more the MoE architecture or exploring different load-balancing, routing strategies or different experts implementations." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.256, + 0.905, + 0.291 + ], + "angle": 0, + "content": "E. Mixture of experts and modality-specific specialization" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.298, + 0.707, + 0.314 + ], + "angle": 0, + "content": "E.1. MoEs configuration" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.32, + 0.907, + 0.368 + ], + "angle": 0, + "content": "We experiment with different MoEs configuration by changing the number of experts and the top-k. We report a sample of these experiments in Table 13." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.375, + 0.707, + 0.391 + ], + "angle": 0, + "content": "E.2. MoEs specialization" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.412, + 0.845, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.568, + 0.907, + 0.61 + ], + "angle": 0, + "content": "Figure 25. Modality-specific specialization. We visualize the experts specialization to text and image modalities. Models are evaluated on Obelics." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.909, + 0.842 + ], + "angle": 0, + "content": "We investigate multimodal specialization in MoE architectures. We compute a specialization score as the average difference between the number of text/images tokens assigned to each expert and a uniform assignment \\((1 / E)\\). Additionally, we visualize the normalized number of text and image tokens assigned to each expert across layers. Figure 25 shows clear modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases but rises again in the very last layers. This suggests that early and final layers require more modality specialization compared to mid-layers. Additionally, we observe several experts shared between text and image modalities, a phenomenon not present in hard-routed or predefined modality-specific experts." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.137, + 0.131, + 0.866, + 0.209 + ], + "angle": 0, + "content": "
L(N,D) = E + A/Nα + B/DβvsL(N,D,S) = A/Nα + B/Dβ + C(1-S)λ + d(1-S)δNγ
ModelEABαβλδγCd
L(N,D) (Eq 1)2.15838177346590.7100.372-----
L(N,D,S) [2]1.0788146600.58900.37200.20.20.709561.0788381475
" + }, + { + "type": "table_caption", + "bbox": [ + 0.309, + 0.21, + 0.687, + 0.224 + ], + "angle": 0, + "content": "Table 12. Scaling laws for sparse native multimodal models." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.324, + 0.866, + 0.404 + ], + "angle": 0, + "content": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
4-E-top-140.055264.06814.28441.94861.4618.51662.20134.08
8-E-top-141.693465.68417.5542.90863.2619.06567.87739.63
8-E-top-242.854666.46619.16245.34463.9419.36165.98841.649
8-E-top-2 finegrained39.90462.7615.5841.8861.617.757.5235.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.312, + 0.406, + 0.684, + 0.42 + ], + "angle": 0, + "content": "Table 13. SFT results with different MoEs configurations." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.522, + 0.907, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.809, + 0.908, + 0.851 + ], + "angle": 0, + "content": "Figure 26. Scaling laws for native multimodal models. From left to right: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. The scaling exponents are very close for all models. However, MoEs leads to overall lower loss (smaller multiplicative constant) and takes longer to saturate." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.142, + 0.355, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.142, + 0.622, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.167, + 0.891, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.359, + 0.35, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.359, + 0.622, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.637, + 0.36, + 0.892, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.553, + 0.365, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.553, + 0.634, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.552, + 0.903, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.247, + 0.752, + 0.752, + 0.802 + ], + "angle": 0, + "content": "
0.289B0.494B1B1.748B2.430B3.714B
0.275B0.464B0.932B1.627B2.280B3.354B
0.275B0.464B0.932B1.627B2.280B3.354B
" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.817, + 0.907, + 0.845 + ], + "angle": 0, + "content": "Figure 27. Scaling laws for native multimodal models. From top to bottom: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. From left to right: cross-entropy on the validation set of image-caption, interleaved and text-only data." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.482, + 0.096, + 0.536, + 0.108 + ], + "angle": 0, + "content": "45-45-10" + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.117, + 0.35, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.117, + 0.621, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.118, + 0.89, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.289, + 0.534, + 0.301 + ], + "angle": 0, + "content": "40-20-40" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.309, + 0.349, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.31, + 0.62, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.31, + 0.89, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.482, + 0.534, + 0.493 + ], + "angle": 0, + "content": "30-30-40" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.502, + 0.349, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.501, + 0.62, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.502, + 0.89, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.674, + 0.534, + 0.686 + ], + "angle": 0, + "content": "20-40-40" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.695, + 0.349, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.695, + 0.62, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.695, + 0.89, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.866, + 0.756, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.895, + 0.907, + 0.923 + ], + "angle": 0, + "content": "Figure 28. Scaling laws for early-fusion native multimodal models. Our runs across different training mixtures (Image-caption-Interleaved-Text) and FLOPs. We visualize the final validation loss on 3 data types: HQITP (left), Obelics (middle) and DCLM (right)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.936 + ], + "angle": 0, + "content": "22" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..10b5f5e0387efd0db6f3ffce613b5dab2fad9c95 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b847b7773af318736aca0b86d2cae77cfb60535aa01da672ef9577890d41dbf6 +size 856853 diff --git a/data/2025/2504_07xxx/2504.07951/full.md b/data/2025/2504_07xxx/2504.07951/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a77aa604bdf29b0145554c32bf941190e20db4ed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/full.md @@ -0,0 +1,650 @@ +# Scaling Laws for Native Multimodal Models + +Mustafa Shukor² + +Enrico Fini + +Victor Guilherme Turrisi da Costa1 + +Matthieu Cord² + +Joshua Susskind + +Alaaeldin El-Nouby + +1Apple + +$^{2}$ Sorbonne University + +# Abstract + +Building general-purpose models that can effectively perceive the world through multimodal signals has been a long-standing goal. Current approaches involve integrating separately pre-trained components, such as connecting vision encoders to LLMs and continuing multimodal training. While such approaches exhibit remarkable sample efficiency, it remains an open question whether such late-fusion architectures are inherently superior. In this work, we revisit the architectural design of native multimodal models (NMMs)-those trained from the ground up on all modalities—and conduct an extensive scaling laws study, spanning 457 trained models with different architectures and training mixtures. Our investigation reveals no inherent advantage to late-fusion architectures over early-fusion ones, which do not rely on image encoders or tokenizers. On the contrary, early-fusion exhibits stronger performance at lower parameter counts, is more efficient to train, and is easier to deploy. Motivated by the strong performance of the early-fusion architectures, we show that incorporating Mixture of Experts (MoEs) allows models to learn modality-specific weights, significantly benefiting performance. + +# 1. Introduction + +Multimodality provides a rich signal for perceiving and understanding the world. Advances in vision [23, 52, 55, 80] and language models [3, 19, 67] have enabled the development of powerful multimodal models that understand language, images, and audio. A common approach involves grafting separately pre-trained unimodal models, such as connecting a vision encoder to the input layer of an LLM [6, 9, 35, 43, 62, 64, 73, 78]. + +Although this seems like a convenient approach, it remains an open question whether such late-fusion strategies are inherently optimal for understanding multimodal signals. Moreover, with abundant multimodal data available, initializing from unimodal pre-training is potentially detrimental, as it may introduce biases that prevent the model + +![](images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg) + +![](images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg) +FLOPs +FLOPs +Figure 1. Scaling properties of Native Multimodal Models. Based on the scaling laws study in § 3.1, we observe: (1) early and late fusion models provide similar validation loss $L$ when trained with the same compute budget $C$ (FLOPs); (2) This performance is achieved via a different trade-off between parameters $N$ and number of training tokens $D$ , where early-fusion models require fewer parameters. (3) Sparse early-fusion models achieve lower loss and require more training tokens for a given FLOP budget. + +from fully leveraging cross-modality co-dependancies. An additional challenge is scaling such systems; each component (e.g., vision encoder, LLM) has its own set of hyperparameters, pre-training data mixtures, and scaling properties with respect to the amount of data and compute applied. A more flexible architecture might allow the model to dynamically allocate its capacity across modalities, simplifying scaling efforts. + +In this work, we focus on the scaling properties of native multimodal models trained from the ground up on multimodal data. We first investigate whether the commonly adopted late-fusion architectures hold an intrinsic advantage by comparing them to early-fusion models, which process raw multimodal inputs without relying on dedicated vision encoders. We conduct scaling experiments on early and late fusion architectures, deriving scaling laws to pre + +dict their performance and compute-optimal configurations. Our findings indicate that late fusion offers no inherent advantage when trained from scratch. Instead, early-fusion models are more efficient and are easier to scale. Furthermore, we observe that native multimodal models follow scaling laws similar to those of LLMs [26], albeit with slight variations in scaling coefficients across modalities and datasets. Our results suggest that model parameters and training tokens should be scaled roughly equally for optimal performance. Moreover, we find that different multimodal training mixtures exhibit similar overall trends, indicating that our findings are likely to generalize to a broader range of settings. + +While our findings favor early fusion, multimodal data is inherently heterogeneous, suggesting that some degree of parameter specialization may still offer benefits. To investigate this, we explore leveraging Mixture of Experts (MoEs) [59], a technique that enables the model to dynamically allocate specialized parameters across modalities in a symmetric and parallel manner, in contrast to late-fusion models, which are asymmetric and process data sequentially. Training native multimodal models with MoEs results in significantly improved performance and therefore, faster convergence. Our scaling laws for MoEs suggest that scaling number of training tokens is more important than the number of active parameters. This unbalanced scaling is different from what is observed for dense models, due to the higher number of total parameters for sparse models. In addition, Our analysis reveals that experts tend to specialize in different modalities, with this specialization being particularly prominent in the early and last layers. + +# 1.1. Summary of our findings + +Our findings can be summarized as follows: + +Native Early and Late fusion perform on par: Early fusion models trained from scratch perform on par with their late-fusion counterparts, with a slight advantage to early-fusion models for low compute budgets (Figure 3). Furthermore, our scaling laws study indicates that the compute-optimal models for early and late fusion perform similarly as the compute budget increases (Figure 1 Top). + +NMMs scale similarly to LLMs: The scaling laws of native multimodal models follow similar laws as text-only LLMs with slightly varying scaling exponents depending on the target data type and training mixture (Table 2). + +Late-fusion requires more parameters: Compute-optimal late-fusion models require a higher parameters-to-data ratio when compared to early-fusion (Figure 1 bottom). + +Sparsity significantly benefits early-fusion NMMs: Sparse NMMs exhibit significant improvements compared to their dense counterparts at the same inference cost (Figure 10). Furthermore, they implicitly learn modality-specific weights when trained with sparsity (Figure 12). In + +
ExpressionDefinition
NNumber of parameters in the multimodal decoder. For MoEs this refers to the active parameters only.
DTotal number of multimodal tokens.
NvNumber of parameters in the vision-specific encoder. Only exists in late-fusion architectures.
DvNumber of vision-only tokens.
CTotal number of FLOPs, estimated as C = 6ND for early-fusion and C = 6(NvDv + ND) for late-fusion.
LValidation loss measured as the average over interleaved image-text, image-caption, and text-only data mixtures.
+ +Table 1. Definitions of the expressions used throughout the paper. + +addition, compute-optimal models rely more on scaling the number of training tokens than the number of active parameters as the compute-budget grows (Figure 1 Bottom). + +Modality-agnostic routing beats Modality-aware routing for Sparse NMMs: Training sparse mixture of experts with modality-agnostic routing consistently outperforms models with modality-aware routing (Figure 11). + +# 2. Preliminaries + +# 2.1. Definitions + +Native Multimodal Models (NMMs): Models that are trained from scratch on all modalities simultaneously without relying on pre-trained LLMs or vision encoders. Our focus is on the representative image and text modalities, where the model processes both text and images as input and generates text as output. + +Early fusion: Enabling multimodal interaction from the beginning, using almost no modality-specific parameters (e.g., except a linear layer to patchify images). Using a single transformer model, this approach processes raw multimodal input—tokenized text and continuous image patches—with no image discretization. In this paper, we refer to the main transformer as the decoder. + +Late fusion: Delaying the multimodal interaction to deeper layers, typically after separate unimodal components has processed that process each modality independently (e.g., a vision encoder connected to a decoder). + +Modality-agnostic routing: In sparse mixture-of-experts, modality-agnostic routing refers to relying on a learned router module that is trained jointly with the model. + +Modality-aware routing: Routing based on pre-defined rules such as routing based on the modality type (e.g., vision-tokens, token-tokens). + +# 2.2. Scaling Laws + +We aim to understand the scaling properties of NMMs and how different architectural choices influence trade-offs. To this end, we analyze our models within the scaling laws framework proposed by Hoffmann et al. [26], Kaplan et al. [31]. We compute FLOPs based on the total number of parameters, using the approximation $C = 6ND$ , as adopted in prior work [2, 26]. However, we modify this estimation to suit our setup: for late-fusion models, FLOPs is computed + +![](images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg) +Figure 2. Scaling laws for early-fusion and late-fusion native multimodal models. Each point represents a model (300M to 3B parameters) trained on varying number of tokens (250M to 400B). We report the average cross-entropy loss on the validation sets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). + +as $6(N_{v}D_{v} + ND)$ . We consider a setup where, given a compute budget $C$ , our goal is to predict the model's final performance, as well as determine the optimal number of parameters or number of training tokens. Consistent with prior studies on LLM scaling [26], we assume a power-law relationship between the final model loss and both model size $(N)$ and training tokens $(D)$ : + +$$ +L = E + \frac {A}{N ^ {\alpha}} + \frac {B}{D ^ {\beta}}. \tag {1} +$$ + +Here, $E$ represents the lowest achievable loss on the dataset, while $\frac{A}{N^{\alpha}}$ captures the effect of increasing the number of parameters, where a larger model leads to lower loss, with the rate of improvement governed by $\alpha$ . Similarly, $\frac{B}{D^{\beta}}$ accounts for the benefits of a higher number of tokens, with $\beta$ determining the rate of improvement. Additionally, we assume a linear relationship between compute budget (FLOPs) and both $N$ and $D$ ( $C \propto ND$ ). This further leads to power-law relationships detailed in Appendix C.7. + +# 2.3. Experimental setup + +Our models are based on the autoregressive transformer architecture [71] with SwiGLU FFNs [58] and QK-Norm [17] following Li et al. [39]. In early-fusion models, image patches are linearly projected to match the text token dimension, while late-fusion follows the CLIP architecture [55]. We adopt causal attention for text tokens and bidirectional attention for image tokens, we found this to work better. Training is conducted on a mixture of public and private multimodal datasets, including DCLM [39], Obelics [34], DFN [21], COYO [11], and a private collection of High-Quality Image-Text Pairs (HQITP). Images are resized to $224 \times 224$ resolution with a $14 \times 14$ patch size. We use a context length of 1k for the multimodal sequences. For training efficiency, we train our models with bfloat16, Fully Sharded Data Parallel (FSDP) [82], activation checkpointing, and gradient accumulation. We also use se + +
L = E + A/Nα + B/DβN ∝ CaD ∝CbL ∝CcD ∝Nd
ModelDataEαβabcd
GPT3 [10]Text------0.048
Chinchilla [26]Text1.6930.3390.2850.460.54-
NMM (early-fusion)Text2.2220.30840.33750.52460.4774-0.04200.9085
Image-Caption1.5690.31110.33860.52030.4785-0.06100.9187
Interleaved1.9660.29710.3380.53150.4680-0.04590.8791
AVG1.9040.3010.3350.52620.473-0.04920.8987
NMM (late-fusion)AVG1.8910.29030.33830.63580.4619-0.04940.6732
Sparse NMM (early-fusion)AVG2.1580.7100.3720.3610.656-0.0471.797
+ +Table 2. Scaling laws for native multimodal models. We report the scaling laws results for early and late fusion models. We fit the scaling laws for different target data types as well as their average loss (AVG). + +quence packing for the image captioning dataset to reduce the amount of padded tokens. Similar to previous works [2, 5, 26], we evaluate performance on held-out subsets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). Further implementation details are provided in Appendix A. + +# 3. Scaling native multimodal models + +In this section, we present a scaling laws study of native multimodal models, examining various architectural choices § 3.1, exploring different data mixtures § 3.2, analyzing the practical trade-offs between late and early fusion NMMs, and comparing the performance of native pretraining and continual pre-training of NMMs § 3.3. + +Setup. We train models ranging from 0.3B to 4B active parameters, scaling the width while keeping the depth constant. For smaller training token budgets, we reduce the warm-up phase to 1K steps while maintaining 5K steps for larger budgets. Following Hagele et al. [25], models are trained with a constant learning rate, followed by a cooldown phase using an inverse square root scheduler. The cool-down phase spans $20\%$ of the total steps spent at the constant learning rate. To estimate the scaling coefficients in Eq 1, we apply the L-BFGS algorithm [51] and Huber loss [28] (with $\delta = 10^{-3}$ ), performing a grid search over initialization ranges. + +# 3.1. Scaling laws of NMMs + +Scaling laws for early-fusion and late-fusion models. Figure 2 (left) presents the final loss averaged across interleaved, image-caption, and text datasets for early-fusion NMMs. The lowest-loss frontier follows a power law as a function of FLOPs. Fitting the power law yields the expression $L \propto C^{-0.049}$ , indicating the rate of improvement with increasing compute. When analyzing the scaling laws per data type (e.g., image-caption, interleaved, text), we observe that the exponent varies (Table 2). For instance, the model achieves a higher rate of improvement for image- + +![](images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg) + +![](images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg) + +![](images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg) + +![](images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg) +Figure 3. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the number of model parameters and the number of training tokens. Overall, early fusion shows a slight advantage, especially at smaller model sizes, and the gap decreases when scaling the number of parameters $N$ . + +caption data $(L\propto C^{-0.061})$ when compared to interleaved documents $(L\propto C^{-0.046})$ + +To model the loss as a function of the number of training tokens $D$ and model parameters $N$ , we fit the parametric function in Eq 1, obtaining scaling exponents $\alpha = 0.301$ and $\beta = 0.335$ . These describe the rates of improvement when scaling the number of model parameters and training tokens, respectively. Assuming a linear relationship between compute, $N$ , and $D$ (i.e., $C \propto ND$ ), we derive the law relating model parameters to the compute budget (see Appendix C for details). Specifically, for a given compute budget $C$ , we compute the corresponding model size $N$ at logarithmically spaced $D$ values and determine $N_{opt}$ , the parameter count that minimizes loss. Repeating this across different FLOPs values produces a dataset of $(C, N_{opt})$ , to which we fit a power law predicting the compute-optimal model size as a function of compute: $N^{*} \propto C^{0.526}$ . + +Similarly, we fit power laws to estimate the compute-optimal training dataset size as a function of compute and model size: + +$$ +D _ {o p t} \propto C ^ {0. 4 7 3}, D _ {o p t} \propto N ^ {0. 8 9 9}. +$$ + +These relationships allow practitioners to determine the optimal model and dataset size given a fixed compute budget. When analyzing by data type, we find that interleaved data benefits more from larger models ( $a = 0.532$ ) compared to image_caption data ( $a = 0.520$ ), whereas the opposite trend holds for training tokens. + +We conduct a similar study on late-fusion models in Figure 2 (right) and observe comparable scaling behaviors. In particular, the loss scaling exponent $(c = -0.0494)$ is nearly identical to that of early fusion $(c = -0.0492)$ . This trend is evident in Figure 3, where early fusion outperforms late fusion at smaller model scales, while both architectures converge to similar performance at larger model sizes. We also observe similar trends when varying late-fusion con + +![](images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg) +Figure 4. Early vs late: pretraining efficiency. Early-fusion is faster to train and consumes less memory. Models are trained on 16 H100 GPUs for 160k steps (300B tokens). + +![](images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg) + +figurations, such as using a smaller vision encoder with a larger text decoder Appendix B. + +Scaling laws of NMMs vs LLMs. Upon comparing the scaling law coefficients of our NMMs to those reported for text-only LLMs (e.g., GPT-3, Chinchilla), we find them to be within similar ranges. In particular, for predicting the loss as a function of compute, GPT-3 [10] follows $L \propto C^{-0.048}$ , while our models follow $L \propto C^{-0.049}$ , suggesting that the performance of NMMs adheres to similar scaling laws as LLMs. Similarly, our estimates of the $\alpha$ and $\beta$ parameters in Eq 1 ( $\alpha = 0.301$ , $\beta = 0.335$ ) closely match those reported by Hoffmann et al. [26] ( $\alpha = 0.339$ , $\beta = 0.285$ ). Likewise, our computed values of $a = 0.526$ and $b = 0.473$ align closely with $a = 0.46$ and $b = 0.54$ from [26], reinforcing the idea that, for native multimodal models, the number of training tokens and model parameters should be scaled proportionally. However, since the gap between $a$ and $b$ is smaller than in LLMs, this principle holds even more strongly for NMMs. Additionally, as $a = 0.526$ is greater than $b = 0.473$ in our case, the optimal model size for NMMs is larger than that of LLMs, + +![](images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg) +Figure 5. Scaling laws with different training mixtures. Early-fusion models follow similar scaling trends when changing the pretraining mixtures. However, increasing the image captions leads to a higher scaling exponent norm (see Table 3). + +
C-I-T (%)I/T ratioEαβabdc
145-45-101.191.9060.3010.3350.5270.4740.901-0.0492
240-20-400.651.9650.3280.3480.5180.4860.937-0.0486
330-30-400.591.8470.2530.3380.5720.4280.748-0.0463
420-40-400.491.8360.2590.3540.5820.4230.726-0.0488
+ +Table 3. Scaling laws for different training mixtures. Early-fusion models. C-I-T refer to image-caption, interleaved and text + +while the optimal number of training tokens is lower, given a fixed compute budget. + +Compute-optimal trade-offs for early vs. late fusion NMMs. While late- and early-fusion models reduce loss at similar rates with increasing FLOPs, we observe distinct trade-offs in their compute-optimal models. Specifically, $N_{opt}$ is larger for late-fusion models, whereas $D_{opt}$ is larger for early-fusion models. This indicates that, given a fixed compute budget, late-fusion models require a higher number of parameters, while early-fusion models benefit more from a higher number of training tokens. This trend is also reflected in the lower $\frac{N_{opt}}{D_{opt}} \propto C^{0.053}$ for early fusion compared to $\frac{N_{opt}}{D_{opt}} \propto C^{0.076}$ for late fusion. As shown in Figure 1 (bottom), when scaling FLOPs, the number of parameters of early fusion models becomes significantly lower, which is crucial for reducing inference costs and, consequently, lowering serving costs after deployment. + +Early-fusion is more efficient to train. We compare the training efficiency of late- and early-fusion architectures. As shown in Figure 4, early-fusion models consume less memory and train faster under the same compute budget. This advantage becomes even more pronounced as compute increases, highlighting the superior training efficiency of early fusion while maintaining comparable performance to late fusion at scale. Notably, for the same FLOPs, late-fusion models have a higher parameter count and higher effective depth (i.e., additional vision encoder layers alongside decoder layers) compared to early-fusion models. + +![](images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg) +Figure 7. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models attain a favorable performance when increasing the proportion of interleaved documents and text-only data. + +![](images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg) + +# 3.2. Scaling laws for different data mixtures + +We investigate how variations in the training mixture affect the scaling laws of native multimodal models. To this end, we study four different mixtures that reflect common community practices [34, 41, 46, 81], with Image Caption-Interleaved-Text ratios of 45-45-10 (our default setup), 30-30-40, 40-20-40, and 20-40-40. For each mixture, we conduct a separate scaling study by training 76 different models, following our setup in § 3.1. Overall, Figure 5 shows that different mixtures follow similar scaling trends; however, the scaling coefficients vary depending on the mixture (Table 3). Interestingly, increasing the proportion of image-caption data (mixtures 1 and 2) leads to lower $a$ and higher $b$ , whereas increasing the ratio of interleaved and text data (mixtures 3 and 4) have the opposite effect. Notably, image-caption data contains more image tokens than text tokens; therefore, increasing its proportion results in more image tokens, while increasing interleaved and text data increases text token counts. This suggests that, when image tokens are prevalent, training for longer decreases the loss faster than increasing the model size. We also found that for a fixed model size, increasing text-only and interleaved data ratio is in favor of early-fusion Figure 7. + +![](images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg) +Figure 8. Early native vs initializing from LLMs: initializing from pre-trained models and scaling training tokens. We compare training with and without initializing from DCLM-1B. + +# 3.3. Native multimodal pre-training vs. continual training of LLMs + +In this section, we compare training natively from scratch to continual training after initializing from a pre-trained LLM. We initialize the model from DCLM-1B [21] that is trained on more than 2T tokens. Figure 8 shows that native multimodal models can close the gap with initialized models when trained for longer. Specifically, on image captioning data, the model requires fewer than 100B multimodal tokens to reach comparable performance. However, on interleaved and text data, the model may need longer training—up to 1T tokens. Considering the cost of pre-training, these results suggest that training natively could be a more efficient approach for achieving the same performance on multimodal benchmarks. + +# 4. Towards multimodal specialization + +Previously, we demonstrated that early-fusion models achieve performance on par with late-fusion models under a fixed compute budget. However, multimodal data is inherently heterogeneous, and training a unified model to fit such diverse distributions may be suboptimal. Here, we argue for multimodal specialization within a unified architecture. Ideally, the model should implicitly adapt to each modality, for instance, by learning modality-specific weights or specialized experts. Mixture of Experts is a strong candidate for this approach, having demonstrated effectiveness in LLMs. In this section, we highlight the advantages of sparse early-fusion models over their dense counterparts. + +Setup. Our sparse models are based on the dropless-MoE implementation of Gale et al. [24], which eliminates token dropping during training caused by expert capacity constraints. We employ a top- $k$ expert-choice routing mechanism, where each token selects its top- $k$ experts among the $E$ available experts. Specifically, we set $k = 1$ and $E = 8$ , as we find this configuration to work effectively. Additionally, we incorporate an auxiliary load-balancing loss [59] with a weight of 0.01 to ensure a balanced expert utilization. + +![](images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg) +Figure 9. Scaling laws for sparse early-fusion NMMs. We report the final validation loss averaged across interleaved, image-captions and text data. + +Following Abnar et al. [2], we compute training FLOPs as $6ND$ , where $N$ represents the number of active parameters. + +# 4.1. Sparse vs dense NMMs when scaling FLOPs + +We compare sparse MoE models to their dense counterparts by training models with different numbers of active parameters and varying amounts of training tokens. Figure 10 shows that, under the same inference cost (or number of active parameters), MoEs significantly outperform dense models. Interestingly, this performance gap is more pronounced for smaller model sizes. This suggests that MoEs enable models to handle heterogeneous data more effectively and specialize in different modalities. However, as dense models become sufficiently large, the gap between the two architectures gradually closes. + +# 4.2. Scaling laws for sparse early-fusion models + +We train different models (ranging from 300M to 3.4B active parameters) on varying amounts of tokens (ranging from 250M to 600B) and report the final loss in Figure 9. We fit a power law to the convex hull of the lowest loss as a function of compute (FLOPs). Interestingly, the exponent $(-0.048)$ is close to that of dense NMMs $(-0.049)$ , indicating that both architectures scale similarly. However, the multiplicative constant is smaller for MoEs (27.086) compared to dense models (29.574), revealing lower loss. Additionally, MoEs require longer training to reach saturation compared to dense models (Appendix C for more details). We also predict the coefficients of Eq 1 by considering $N$ as the number of active parameters. Table 2 shows significantly higher $\alpha$ compared to dense models. Interestingly, $b$ is significantly higher than $a$ , revealing that the training tokens should be scaled at a higher rate than the number of parameters when training sparse NMMs. We also experiment with a scaling law that takes into account the sparsity [2] and reached similar conclusions in Appendix C.7. + +# 4.3. Modality-aware vs. Modality-agnostic routing + +Another alternative to MoEs is modality-aware routing, where multimodal tokens are assigned to experts based on + +![](images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg) +Figure 10. MoE vs Dense: scaling training FLOPs. We compare MoE and dense early-fusion models when scaling both the amount of training tokens and model sizes. MoEs beat dense models when matching the number of active parameters. + +their modalities, similar to previous works [7, 75]. We train models with distinct image and text experts in the form of FFNs, where image tokens are processed only by the image FFN and text tokens only by the text FFN. Compared to modality-aware routing, MoEs exhibit significantly better performance on both image-caption and interleaved data as presented in Figure 11. + +# 4.4. Emergence of expert specialization and sharing + +We investigate multimodal specialization in MoE architectures. In Figure 13, we visualize the normalized number of text and image tokens assigned to each expert across layers. To quantify this specialization, we compute a specialization score, defined as the average, across all experts within a layer, of $1 - H(p)$ , where $H$ is the binary entropy of each expert's text/image token distribution. We plot this specialization score in Figure 12. Higher specialization scores indicate a tendency for experts to focus on either text or image tokens, while lower scores indicate a shared behavior. These visualizations provide clear evidence of modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases, before rising again in the last layers. This suggests that early and final layers exhibit higher modality specialization compared to mid-layers. This behavior is intuitive, as middle layers are expected to hold higher-level features that may generalize across modalities, and consistent with findings in [61] that shows increasing alignment between modalities across layers. The emergence of both expert specialization and cross-modality sharing in our modality-agnostic MoE, suggests it may be a preferable approach compared to modality-aware sparsity. All data displayed here is from an early-fusion MoE model with 1B active parameters trained for 300B tokens. + +
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
Late-fusion46.869.425.850.165.822.870.750.9
Early-fusion47.669.328.152.165.423.272.053.8
Early-MoEs48.269.830.052.165.423.669.655.7
+ +Table 4. Supervised finetuning on the LLaVA mixture. All models are native at 1.5B scale and pre-trained on 300B tokens. + +![](images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg) + +![](images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg) + +![](images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg) +Figure 11. Modality-aware vs modality agnostic routing for sparse NMMs. We compare modality-agnostic routing with modality-aware routing when scaling both the amount of training tokens and model sizes. + +# 5. Evaluation on downstream tasks with SFT + +Following previous work on scaling laws, we primarily rely on validation losses. However, we generally find that this evaluation correlates well with performance on downstream tasks. To validate this, we conduct a multimodal instruction tuning stage (SFT) on the LLaVA mixture [43] and report accuracy and CIDEr scores across several VQA and captioning tasks. Table 4 confirms the ranking of different model configurations. Specifically, early fusion outperforms late fusion, and MoEs outperform dense models. However, since the models are relatively small (1.5B scale), trained from scratch, and fine-tuned on a small dataset, the overall scores are lower than the current state of the art. Further implementation details can be found in Appendix A. + +# 6. Related work + +Large multimodal models. A long-standing research goal has been to develop models capable of perceiving the world through multiple modalities, akin to human sensory experience. Recent progress in vision and language processing has shifted the research focus from smaller, task-specific models toward large, generalist models that can handle diverse inputs [29, 67]. Crucially, pre-trained vision and language backbones often require surprisingly little adaptation to enable effective cross-modal communication [32, 47, 62, 68, 69]. Simply integrating a vision encoder with either an encoder-decoder architecture [45, 48, 63, 72] + +![](images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg) +Figure 12. MoE specialization score. Entropy-based image/text specialization score (as described in § 4.4) across layers for two data sources: HQITP and Obelics. HQITP has a more imbalanced image-to-text token distribution, resulting in generally higher specialization. Despite this difference, both data sources exhibit a similar trend: the specialization score decreases in the early layers before increasing again in the final layers. + +or a decoder-only LLM has yielded highly capable multimodal systems [1, 6, 9, 13, 16, 35, 43, 49, 64, 73, 78, 83]. This late-fusion approach, where modalities are processed separately before being combined, is now well-understood, with established best practices for training effective models [34, 41, 46, 81]. In contrast, early-fusion models [8, 18, 66], which combine modalities at an earlier stage, remain relatively unexplored, with only a limited number of publicly released models [8, 18]. Unlike [18, 66], our models utilize only a single linear layer and rely exclusively on a next-token prediction loss. Furthermore, we train our models from scratch on all modalities without image tokenization. + +Native Multimodal Models. We define native multimodal models as those trained from scratch on all modalities simultaneously [67] rather than adapting LLMs to accommodate additional modalities. Due to the high cost of training such models, they remain relatively underexplored, with most relying on late-fusion architectures [27, 79]. Some multimodal models trained from scratch [4, 66, 76] relax this constraint by utilizing pre-trained image tokenizers such as [20, 70] to convert images into discrete tokens, integrating them into the text vocabulary. This approach enables models to understand and generate text and images, facilitating a more seamless multimodal learning process. + +Scaling laws. Scaling law studies aim to predict how model performance scales with training compute. Early works [26, 31] found that LLM performance follows a power-law relationship with compute, enabling the compute-optimal estimation of the number of model parameters and training tokens at scale for a given budget. Similar research has extended these findings to sparse Mixture of Experts (MoE) models, considering factors such as sparsity, number of experts, and routing granularity [15, 33, 74]. Scaling laws have also been observed across various domains, including image models [23], video models [56], protein LLMs [14], and imitation learning [54]. However, few stud + +![](images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg) +Figure 13. MoE specialization frequency. Percentage of text and image tokens routed to each expert on interleaved data from Obelics. Experts are ordered for better visualization. The first layer shows the highest amount of unimodal experts. + +![](images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg) + +![](images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg) + +ies have investigated scaling laws for multimodal models. Notably, Aghajanyan et al. [5] examined multimodal models that tokenize modalities into discrete tokens and include multimodal generation. In contrast, we focus on studying early-fusion models that take raw multimodal inputs and are trained on interleaved multimodal data. + +Mixture of experts (MoEs). MoEs [59] scale model capacity efficiently by sparsely activating parameters, enabling large models with reduced per-sample compute. While widely studied in LLMs [22, 30, 36, 37, 42, 65, 77, 84], MoEs remain underexplored in multimodal settings. Prior work has examined contrastive models [50], late-fusion LLMs [38, 40], and modality-specific experts [7, 12, 60]. We focus on analyzing MoEs in early-fusion multimodal models. + +# 7. Limitations + +Our study finds that scaling law coefficients are broadly consistent across training mixtures, though a broader exploration is needed to validate this observation. While validation loss scales predictably with compute, the extent to which this correlates with downstream performance remains unclear and warrants further investigation. The accuracy of scaling law predictions improves with higher FLOPs, but their extrapolation to extreme model sizes is still an open question (Appendix D for more details). + +# 8. Conclusion + +We explore various strategies for compute-optimal pretraining of native multimodal models. We found the NMMs follow similar scaling laws to those of LLMs. Contrary to common belief, we find no inherent advantage in adopting late-fusion architectures over early-fusion ones. While both architectures exhibit similar scaling properties, early-fusion models are more efficient to train and outperform late-fusion models at lower compute budgets. Furthermore, we show that sparse architectures encourage modality-specific specialization, leading to performance improvements while maintaining the same inference cost. + +# Acknowledgment + +We thank Philipp Dufter, Samira Abnar, Xiujun Li, Zhe Gan, Alexander Toshev, Yinfei Yang, Dan Busbridge, and Jason Ramapuram for many fruitful discussions. We thank Denise Hui, and Samy Bengio for infra and compute support. Finally, we thank, Louis Bethune, Pierre Ablin, Marco Cuturi, and the MLR team at Apple for their support throughout the project. + +# References + +[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 8 +[2] Samira Abnar, Harshay Shah, Dan Busbridge, Alaaeldin Mohamed Elnouby Ali, Josh Susskind, and Vimal Thilak. Parameters vs flops: Scaling laws for optimal sparsity for mixture-of-experts language models. arXiv preprint arXiv:2501.12370, 2025. 2, 3, 6, 18, 20 +[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1 +[4] Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. 8 +[5] Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In International Conference on Machine Learning, pages 265-279. PMLR, 2023. 3, 8 +[6] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 1, 8 +[7] Hangbo Bao, Wenhui Wang, Li Dong, Qiang Liu, Owais Khan Mohammed, Kriti Aggarwal, Subhojit Som, and Furu Wei. Vlmo: Unified vision-language pretraining with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358, 2021. 7, 8 +[8] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Săgnak Taşirlar. Introducing our multimodal models, 2023. 8 +[9] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1, 8 + +[10] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3, 4 +[11] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3, 13 +[12] Junyi Chen, Longteng Guo, Jia Sun, Shuai Shao, Zehuan Yuan, Liang Lin, and Dongyu Zhang. Eve: Efficient vision-language pre-training with masked prediction and modality-aware moe. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1110-1119, 2024. 8 +[13] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 8 +[14] Xingyi Cheng, Bo Chen, Pan Li, Jing Gong, Jie Tang, and Le Song. Training compute-optimal protein language models. bioRxiv, 2024. 8 +[15] Aidan Clark, Diego de Las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bogdan Damoc, Blake Hechtman, Trevor Cai, Sebastian Borgeaud, et al. Unified scaling laws for routed language models. In International conference on machine learning, pages 4057-4086. PMLR, 2022. 8 +[16] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024.8 +[17] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3 +[18] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. arXiv preprint arXiv:2406.11832, 2024.8 +[19] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1 +[20] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12873-12883, 2021. 8 +[21] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 3, 6, 13 + +[22] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 8 +[23] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders, 2024. 1, 8 +[24] Trevor Gale, Deepak Narayanan, Cliff Young, and Matei Zaharia. Megablocks: Efficient sparse training with mixture-of-experts. Proceedings of Machine Learning and Systems, 5:288-304, 2023. 6 +[25] Alexander Hagele, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 3 +[26] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, pages 30016-30030, 2022. 2, 3, 4, 8, 17 +[27] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, et al. Language is not all you need: Aligning perception with language models. Advances in Neural Information Processing Systems, 36:72096-72109, 2023. 8 +[28] Peter J. Huber. Robust Estimation of a Location Parameter, pages 492-518. Springer New York, New York, NY, 1992. 3 +[29] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 7 +[30] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 8 +[31] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 2, 8, 15 +[32] Jing Yu Koh, Ruslan Salakhutdinov, and Daniel Fried. Grounding language models to images for multimodal inputs and outputs. In International Conference on Machine Learning, pages 17283-17300. PMLR, 2023. 7 +[33] Jakub Krajewski, Jan Ludziejewski, Kamil Adamczewski, Maciej Pioro, Michal Krutul, Szymon Antoniak, Kamil Ciebiera, Krystian Król, Tomasz Odrzygoźdź, Piotr Sankowski, et al. Scaling laws for fine-grained mixture of experts. arXiv preprint arXiv:2402.07871, 2024. 8, 18 + +[34] Hugo Laurencon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024. 3, 5, 8, 13 +[35] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? arXiv preprint arXiv:2405.02246, 2024. 1, 8 +[36] Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668, 2020. 8 +[37] Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. Base layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265-6274. PMLR, 2021. 8 +[38] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024. 8 +[39] Jeffrey Li, Alex Fang, Georgios Smyrnis, Maor Ivgi, Matt Jordan, Samir Gadre, Hritik Bansal, Etash Guha, Sedrick Keh, Kushal Arora, et al. Datacomp-lm: In search of the next generation of training sets for language models. arXiv preprint arXiv:2406.11794, 2024. 3, 13, 15 +[40] Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947, 2024. 8 +[41] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 5, 8 +[42] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 8 +[43] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 1, 7, 8 +[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 13 +[45] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. In The Eleventh International Conference on Learning Representations, 2022. 7 +[46] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Duffer, Dhruti Shah, Xianzhi Du, Futang Peng, Anton Belyi, et al. Mm1: methods, analysis and insights from multimodal llm pre-training. In European Conference on Computer Vision, pages 304–323. Springer, 2025. 5, 8, 13 + +[47] Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. Linearly mapping from image to text space. In *The Eleventh International Conference on Learning Representations*, 2023. 7 +[48] David Mizrahi, Roman Bachmann, Oguzhan Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling. Advances in Neural Information Processing Systems, 36:58363-58408, 2023. 7 +[49] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. 8 +[50] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. Advances in Neural Information Processing Systems, 35:9564-9576, 2022. 8 +[51] Jorge Nocedal. Updating quasi newton matrices with limited storage. Mathematics of Computation, 35(151):951-958, 1980. 3 +[52] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 1 +[53] Tim Pearce and Jinyeop Song. Reconciling kaplan and chinchilla scaling laws. arXiv preprint arXiv:2406.12907, 2024. 15 +[54] Tim Pearce, Tabish Rashid, Dave Bignell, Raluca Georgescu, Sam Devlin, and Katja Hofmann. Scaling laws for pre-training agents and world models. arXiv preprint arXiv:2411.04434, 2024. 8 +[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 1, 3, 15 +[56] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pretraining from videos. arXiv preprint arXiv:2501.05453, 2025.8 +[57] Kanchana Ranasinghe, Brandon McKinzie, Sachin Ravi, Yinfei Yang, Alexander Toshev, and Jonathon Shlens. Perceptual grouping in contrastive vision-language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5571-5584, 2023. 13 +[58] Noam Shazeer. Glu variants improve transformer. arXiv preprint arXiv:2002.05202, 2020. 3 +[59] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture + +of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 6, 8 +[60] Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, and Yuxiong He. Scaling vision-language models with sparse mixture of experts. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. 8 +[61] Mustafa Shukor and Matthieu Cord. Implicit multimodal alignment: On the generalization of frozen llms to multimodal inputs. Advances in Neural Information Processing Systems, 37:130848-130886, 2024. 7 +[62] Mustafa Shukor, Corentin Dancette, and Matthieu Cord. eplalm: Efficient perceptual augmentation of language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22056-22069, 2023. 1, 7 +[63] Mustafa Shukor, Corentin Dancette, Alexandre Rame, and Matthieu Cord. Unival: Unified model for image, video, audio and language tasks. Transactions on Machine Learning Research Journal, 2023. 7 +[64] Mustafa Shukor, Dana Aubakirova, Francesco Capuano, Pepijn Kooijmans, Steven Palma, Adil Zoutine, Michel Ar-actingi, Caroline Pascal, Martino Russi, Andres Marafioti, et al. Smolvla: A vision-language-action model for affordable and efficient robotics. arXiv preprint arXiv:2506.01844, 2025. 1, 8 +[65] Xingwu Sun, Yanfeng Chen, Yiqing Huang, Ruobing Xie, Jiaqi Zhu, Kai Zhang, Shuaipeng Li, Zhen Yang, Jonny Han, Xiaobo Shu, et al. Hunyuan-large: An open-source moe model with 52 billion activated parameters by tencent. arXiv preprint arXiv:2411.02265, 2024. 8 +[66] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 8 +[67] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 7, 8 +[68] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyals, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 7 +[69] Théophane Vallaeys, Mustafa Shukor, Matthieu Cord, and Jakob Verbeek. Improved baselines for data-efficient perceptual augmentation of llms. arXiv preprint arXiv:2403.13499, 2024. 7 +[70] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 8 +[71] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 3 +[72] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. 7 + +[73] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8 +[74] Siqi Wang, Zhengyu Chen, Bei Li, Keqing He, Min Zhang, and Jingang Wang. Scaling laws across model architectures: A comparative analysis of dense and MoE models in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5583-5595, Miami, Florida, USA, 2024. Association for Computational Linguistics. 8, 18 +[75] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 7 +[76] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 8 +[77] Tianwen Wei, Bo Zhu, Liang Zhao, Cheng Cheng, Biye Li, Weiwei Lu, Peng Cheng, Jianhao Zhang, Xiaoyu Zhang, Liang Zeng, et al. Skywork-moe: A deep dive into training techniques for mixture-of-experts language models. arXiv preprint arXiv:2406.06563, 2024.8 +[78] Le Xue, Manli Shu, Anas Awadalla, Jun Wang, An Yan, Senthil Purushwalkam, Honglu Zhou, Viraj Prabhu, Yutong Dai, Michael S Ryoo, et al. xgen-mm (blip-3): A family of open large multimodal models. arXiv preprint arXiv:2408.08872, 2024. 1, 8 +[79] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 8 +[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 1 +[81] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Duffer, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 5, 8 +[82] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 3 +[83] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 8 +[84] Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906, 2022. 8 + +# Scaling Laws for Native Multimodal Models Supplementary Material + +This supplementary material is organized as follows: + +- Appendix A: contains the implementation details and the hyperparameters used to train our models. +- Appendix B: contains detailed comparison between early and late fusion models. +- Appendix C: contains more details about scaling laws derivation, evaluation and additional results. +- Appendix D: contains discussion about the paper limitations. +- Appendix E: contains more results about MoEs and modality specialization. + +# A. Experimental setup + +In Table 6, we show the pre-training hyperparameters for different model configurations used to derive the scaling laws. The number of parameters ranges from 275M to 3.7B, with model width increasing accordingly, while the depth remains fixed at 24 layers. Learning rates vary by model size, decreasing as the model scales up. Based on empirical experiments and estimates similar to [46], we found these values to be effective in our setup. Training is optimized using a fully decoupled AdamW optimizer with momentum values $\beta_{1} = 0.9$ , $\beta_{2} = 0.95$ , and a weight decay of $1\mathrm{e} - 4$ . The batch size is set to 2k samples, which account for 2M tokens, given 1k context length. Gradient clipping is set to 1.0, with a maximum warmup duration of 5k iterations, adjusted for shorter training runs: 1k and 2.5k warmup steps for models trained between 1k-4k and 5k-15k steps, respectively. For MoEs, we found that longer warmup is significantly better, so we adopt a 2.5k warmup for all runs under 20k steps. We use a constant learning rate schedule with cooldown during the final $20\%$ of training, gradually reducing to zero following an inverse square root schedule. For vision processing, image inputs are divided into (14, 14) patches, with augmentations including Random Resized Crop (resizing images to 224px with a scale range of [0.4, 1.0]) and Random Horizontal Flip with a probability of 0.5. We train our models on mixture of interleaved, image captions and text only data Table 5. For late fusion models, we found that using smaller learning rate for the vision encoder significantly boost the performance Table 8, and when both the encoder and decoder are initialized (Appendix B.7) we found that freezing the vision encoder works best Table 7. + +
Data typedataset#samplessampling prob.
DFN [21]2B27%
Image-CaptionCOYO [11]600M11.25%
HQITP[57]400M6.75%
InterleavedObelics [34]141M Docs45%
TextDCLM [39]6.6T Toks10%
+ +Table 5. Pre-training data mixture. Unless otherwise specified, the training mixture contains $45\%$ , $45\%$ and $10\%$ of image captions, interleaved documents and text-only data. + +
Early-fusion
Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35e-44.2e-44e-43.5e-4
Late-fusion
Params289M494M1B1.75B2.43B3.7B
vision encoder width384512768102411841536
vision encoder depth24
width76810241536204824643072
depth24
Learning rate1.5e-31.5e-35e-44.2e-43.8e-43.3e-4
Early-fusion MoEs
Active Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35 e-44.2e-44e-43.5e-4
Training tokens2.5B-600B
OptimizerFully decoupled AdamW [44]
Optimizer Momentumβ1=0.9, β2=0.95
Minimum Learning rate0
Weight decay1e-4
Batch size2k
Patch size(14, 14)
Gradient clipping1.0
MAximum Warmup iterations5k
Augmentations: +RandomResizedCrop +size224px
scale[0.4, 1.0]
RandomHorizontalFlipp=0.5
+ +Table 6. Pre-training hyperparameters We detail the hyperparameters used for pre-training different model configurations to derive scaling laws. + +
Vision encoder +lr schedulerInterleaved +(CE)Image-Caption +(CE)Text +(CE)AVG +(CE)AVG (SFT) +(Acc)
12.5212.152.8672.51343.49
0.12.5022.0662.8622.47752.27
0.012.5022.0662.8592.47653.76
0.0012.5132.0662.8572.479-
0 (frozen)2.5042.0612.8562.47454.14
+ +Table 7. Vision encoder scalar. Freezing the vision encoder works best when initializing late-fusion models with pre-trained models. + +![](images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg) + +![](images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg) + +![](images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg) + +![](images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg) +Figure 14. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the model size and the number of training tokens. The gap decreases mainly due to scaling models size. + +![](images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg) +Figure 15. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models become better when increasing the proportion of interleaved documents. Early and late fusion has 1.63B and 1.75B parameters respectively. + +![](images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg) + +![](images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg) + +
Vision encoder lrScalerInterleaved (CE)Image-Caption (CE)Text (CE)AVG (CE)AVG (SFT) (Acc)
0.12.6742.2193.0722.65534.84
0.012.6722.1973.0712.64738.77
0.0012.6742.2183.0732.65538.46
+ +Table 8. Vision encoder scalar. Reducing the learning rate for the vision encoder is better when training late-fusion models from scratch. + +# B. Late vs early fusion + +This section provides additional comparison between early and late fusion models. + +# B.1. Scaling FLOPs + +Figure 14 compares early-fusion and late-fusion models when scaling FLOPs. Specifically, for each model size, we train multiple models using different amounts of training tokens. The performance gap between the two approaches mainly decreases due to increasing model sizes rather than increasing the number of training tokens. Despite the decreasing gap, across all the models that we train, early-fusion consistently outperform late-fusion. + +# B.2. Changing the training data mixture + +We analyze how the performance gap between early and late fusion models changes with variations in the training data mixture. As shown in Figure 16 and Figure 15, when fixing the model size, increasing the ratio of text and interleaved data favors early fusion. Interestingly, the gap remains largely unchanged for other data types. We also observe interference effects between different data types. Specifically, increasing the amount of interleaved data negatively impacts performance on image captions and vice versa. Additionally, increasing the proportion of text-only data slightly improves interleaved performance but increases loss on image captions. Overall, we find that text-only and interleaved data are correlated across different setups. + +# B.3. Scaling image resolution is in favor of early-fusion + +We examine how both architectures perform with varying image resolution. We fix the number of model parameters to 1.63B and 1.75B for early and late fusion respectively. All models are trained for 100K steps or 200B tokens. Since + +![](images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg) +Figure 16. Early vs late fusion: changing the amount of text-only data in the training mixture (isoFLOPs). We vary the ratio of text-only data and plot the final training loss. The gap increases with the text data ratio in favor of early fusion model. Early fusion has 1.63B parameters and late fusion 1.75B parameters. + +![](images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg) + +![](images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg) + +![](images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg) +Figure 17. Early vs late fusion: training with different image resolutions (isoFLOPs). For the same training FLOPs we vary the image resolution (and thus the number of image tokens) during training and report the final training loss. Increasing resolution, hurts the performance on text and interleaved documents, while helping image captioning. The gap stays almost the same on text and interleaved data while slightly increase on image captioning in favor of early fusion. + +![](images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg) + +the patch size remains constant, increasing the resolution results in a higher number of visual tokens. For all resolutions, we maintain the same number of text tokens. As shown in Figure 17, the early-fusion model consistently outperforms the late-fusion model across resolutions, particularly for multimodal data, with the performance gap widening at higher resolutions. Additionally, we observe that the loss on text and interleaved data increases as resolution increases. + +# B.4. Early-fusion is consistently better when matching the late-fusion model size + +In this section, we compare the late-fusion model with different configurations of early-fusion one. Specifically, we train early-fusion models that match the late-fusion model in total parameters (Params), text model size (Text), and FLOPs (FLOPs), assuming 45-45-10 training mixture. As shown in Figure 18, early fusion consistently outperforms late fusion when normalized by total parameters, followed + +by normalization by FLOPs. When matching the text model size, early fusion performs better at higher ratios of interleaved data. + +# B.5. Different late-fusion configuration + +We examine how this scaling changes with different late-fusion configurations. Instead of scaling both the vision and text models equally, as done in the main paper, we fix the vision encoder size to 300M and scale only the text model. Figure 19 shows that late-fusion models lag behind at smaller model sizes, with the gap closing significantly as the text model scales. This suggests that allocating more parameters to shared components is more beneficial, further supporting the choice of early-fusion models. + +# B.6. Different context lengths + +In the paper, we use a 1k context length following [31]. Also following, this paper, we ignore the context length effect, as the model dimension dominates the training compute estimate. Moreover, [53] empirically found that scaling coefficients are robust to context length. Nevertheless, Our initial experiments (Figure 20) indicate that scaling the context length did not significantly affect the comparison between late and early fusion. + +# B.7. Initializing from LLM and CLIP + +We study the case where both late and early fusion models are initialized from pre-trained models, specifically DCLM-1B [39] and CLIP-ViT-L [55] for late fusion. Interestingly, Figure 21 shows that for text and interleaved multimodal documents, early fusion can match the performance of late fusion when trained for longer. However, closing the gap on image caption data remains more challenging. Notably, when considering the overall training cost, including that of pre-trained models, early fusion requires significantly longer training to compensate for the vision encoder's pretraining cost. + +![](images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg) + +![](images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg) + +![](images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg) + +![](images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg) +Figure 18. Early vs late fusion: changing the training mixture and early-fusion configuration. We vary the training mixtures and plot the final training loss for different configuration of early fusion models. For the same number of total parameters early fusion consistently outperform late fusion. + +![](images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg) + +![](images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg) + +![](images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg) + +![](images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg) +Figure 19. Early vs late fusion: scaling training FLOPs while fixing the vision encoder size. We compare early and late fusion models when scaling both the amount of training tokens and model sizes. For late fusion mdoels, we fix the vision encoder size (300M) and scale the text model (250M, 834M, 2B, 3B). The gap between early and late get tighter when scaling the text model. + +![](images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg) +Figure 20. Early vs late fusion with different context lengths. + +![](images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg) +Figure 21. Early vs late fusion when initializing the encoder and decoder. Early-fusion can match the performance of late-fusion models when trained for longer. However, the gap is bigger on image-caption data. + +# C. Scaling laws + +# C.1. Fitting $L = F(N,D)$ + +Following [26], we determine the parameters that minimize the following objective across all our runs $i$ : + +$$ +\min _ {a, b, e, \alpha , \beta} \sum_ {i} \operatorname {H u b e r} _ {\delta} \left(\operatorname {L S E} \left(a - \alpha \log N _ {i}, b - \beta \log D _ {i}, e\right) - \log L _ {i}\right), \tag {2} +$$ + +We perform this optimization across various initialization ranges and select the parameters that achieve the lowest loss across all initializations. Specifically, our grid search spans $\{0, 0.5, 2.5\}$ for $\alpha$ and $\beta$ , $\{0, 5, 10, \dots, 30\}$ for $a$ and $b$ , and $\{-1, -0.5, 1, 0.5\}$ for $e$ . We use the L-BFGS algorithm with $\delta = 1e - 3$ . + +# C.2. Fitting $N \propto C^{a}, D \propto C^{b}, D \propto N^{d}$ + +While these equations have a closed-form solution [26] for early-fusion models that can be derived from Eq 1, this is not the case for late-fusion models without specifying either the vision encoder or text model size. To ensure a fair comparison, we derive these equations for both models, by performing linear regression in log space. We found that the regression is very close to the coefficient found with closed-form derivation Table 9. For instance, to derive $N = K_{a}C^{a}$ , given a FLOP budget $C$ and a set of linearly spaced tokens $D_{i}$ ranging from 10B to 600B, we compute the model size for each $D_{i}$ as $N_{i} = \frac{C}{6D}$ for early fusion and $N_{i} = \frac{C}{6D} + 0.483 * N_{v}$ for late fusion (for the 45-45-10 mixture, $D_{v} = 0.544D$ , thus $C = 6D(0.544N_{v} + N_{t})$ ). We then apply Eq 1 to obtain the loss for each model size and select $N$ that has the minimum loss. We repeat this for all FLOP values corresponding to our runs, resulting in a set of points $(C, N_{opt})$ that we use to regress $a$ and $K_{a}$ . We follow a similar procedure to find $b$ and $d$ . For late-fusion models, we regress a linear model to determine $N_{v}$ given $N$ . Notably, even though we maintain a fixed width ratio for late-fusion models, this approach is more accurate, as embedding layers prevent a strictly fixed ratio between text and vision model sizes. We present the regression results in Figure 22. + +
Modelabdndn
Closed form0.526490.473510.899381.11188-0.05298
Regression0.523910.475340.900521.10224-0.04933
+ +Table 9. Scaling laws parameters for early-fusion. Doing regression to derive the scaling laws coefficients leads to very close results to using the closed-form solution. + +# C.3. Fitting $L \propto C^c$ + +To determine the relationship between the final model loss and the compute budget $C$ , we begin by interpolating the points corresponding to the same model size and compute + +the convex hull that covers the minimum loss achieved by all runs for each FLOP. This results in a continuous mapping from the FLOPs to the lowest loss. We consider a range of FLOPs, excluding very small values $(\leq 3e^{19})$ , and construct a dataset of $(C,L)$ for linearly spaced compute $C$ . Using this data, we find the linear relationship between $L$ and $C$ in the log space and deduce the exponent $c$ . We visualize the results in Figure 26. + +![](images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg) +C + +![](images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg) +C + +![](images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg) + +![](images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg) + +![](images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg) +C +C + +![](images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg) +C +C +Figure 22. Regression results of the scaling laws coefficients. our estimation of the scaling coefficients is close to the closed form solution. + +# C.4. Scaling laws for different target data type + +In Figure 27, we derive the scaling laws for different target data types. In general, we observe that the model learns image captioning faster than interleaved data, as indicated by the higher absolute value of the scaling exponent (e.g., 0.062 vs 0.046), despite using the same data ratio for captioning and interleaved data (45% each). Additionally, we find that the model learns more slowly on text-only data, likely due to the smaller amount of text-only data (10%). Across model configurations, we find that early fusion scales similarly to late fusion on image captioning but has a lower multiplicative constant (49.99 vs 47.97). For MoEs, the model learns faster but exhibits a higher multiplicative constant. On text and interleaved data, early and late fusion models scale similarly and achieve comparable + +![](images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg) +Figure 23. Observed vs predicted loss. We visualize the loss predicted by our scaling laws (Eq 1) and the actual loss achieved by each run. + +![](images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg) + +performance. However, MoEs demonstrate better overall performance while learning slightly more slowly. + +# C.5. Scaling laws for different training mixtures + +We investigate how the scaling laws change when modifying the training mixtures. Specifically, we vary the ratio of image caption, interleaved, and text-only data and report the results in Figure 28. Overall, we observe similar scaling trends, with only minor changes in the scaling coefficients. Upon closer analysis, we find that increasing the ratio of a particular data type in the training mixture, leads to a corresponding increase in its scaling exponent. For instance, increasing the ratio of image captions from $30\%$ to $40\%$ raises the absolute value of the exponent from 0.056 to 0.061. However, for text-only data, we do not observe significant changes in the scaling coefficients when varying its proportion in the training mixture. + +
ParameterMSER2MAE (%)
Held-in0.00290.98070.8608
Held-out0.00040.96820.5530
+ +Table 10. Scaling laws prediction errors. We report the mean square error, R2 and mean absolute error for the loss prediction for held-in and held-out (8B model) data. + +
ModelEαβabd
Avg1.809220.298420.332090.543020.483010.92375
Std0.338110.101010.028920.088130.057870.23296
+ +Table 11. Scaling laws sensitivity. We report the mean and standard deviation after bootstrapping with 100 iterations. + +# C.6. Scaling laws evaluation + +For each model size and number of training tokens, we compute the loss using the estimated functional form in Eq 1 and compare it to the actual loss observed in our runs. Figure 23, Figure 24, and Table 10 visualizes these comparisons, showing that our estimation is highly accurate, particularly for lower loss values and larger FLOPs. We also assess our scaling laws in an extrapolation setting, predicting performance beyond the model sizes used for fitting. Notably, our approach estimates the performance of an 8B model with reasonable accuracy. + +Additionally, we conduct a sensitivity analysis using bootstrapping. Specifically, we sample $P$ points with replacement ( $P$ being the total number of trained models) and re-estimate the scaling law coefficients. This process is repeated 100 times, and we report the mean and standard deviation of each coefficient. Table 11 shows that our estimation is more precise for $\beta$ than for $\alpha$ , primarily due to the smaller number of model sizes relative to the number of different token counts used to derive the scaling laws. + +# C.7. Scaling laws for sparse NMMs. + +Similar to dense models, we fit a parametric loss function (Eq 1) to predict the loss of sparse NMMs based on the number of parameters and training tokens, replacing the total parameter count with the number of active parameters. While incorporating sparsity is standard when deriving scaling laws for MoEs [2, 33, 74], we focus on deriving scaling laws specific to the sparsity level used in our MoE setup. This yields coefficients that are implicitly conditioned on the sparsity configuration. + +We also experiment with a sparsity-aware formulation of the scaling law as proposed in [2], and observe consistent + +![](images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg) +Figure 24. Observed vs predicted loss. We visualize the loss predicted by our scaling laws Eq 1 and the actual loss achieved by each run. We can reliably predict the performance of models larger (8B params) than those used to fit the scaling laws. + +trends (Table 12). In particular, the exponents associated with model size $(N)$ are substantially larger than those for training tokens $(\beta)$ , reinforcing the importance of scaling model size in sparse architectures. Additionally, we observe that the terms governing the scaling of active parameters decompose into two components. + +# D. Discussion and Limitations + +Scaling laws for multimodal data mixtures. Our scaling laws study spans different model configurations and training mixtures. While results suggest that the scaling law coefficients remain largely consistent across mixtures, a broader exploration of mixture variations is needed to validate this observation and establish a unified scaling law that accounts for this factor. + +Scaling laws and performance on downstream tasks. Similar to previous scaling law studies, our analysis focuses on pretraining performance as measured by the validation loss. However, the extent to which these findings translate to downstream performance remains an open question and requires further investigation. + +Extrapolation to larger scales. The accuracy of scaling law predictions improves with increasing FLOPs Appendix C. Furthermore, we validate our laws when extrapolating to larger model sizes (Appendix C.6). However, whether these laws can be reliably extrapolated to extremely large model sizes remains an open question. + +High resolution and early-fusion models. Training early-fusion models with high-resolution inputs leads to a significant increase in vision tokens. While pooling techniques have been widely adopted for late-fusion models, alternative approaches may be necessary for early fusion. Given the similarity of early-fusion models to LLMs, it appears + +that techniques for extending context length could be beneficial. + +Scaling laws for multimodal MoEs models. For MoEs, we consider only a single configuration (top-1 routing with 8 experts). We found this configuration to work reasonably well in our setup, and follow a standard MoEs implementation. However, the findings may vary when optimizing more the MoE architecture or exploring different load-balancing, routing strategies or different experts implementations. + +# E. Mixture of experts and modality-specific specialization + +# E.1. MoEs configuration + +We experiment with different MoEs configuration by changing the number of experts and the top-k. We report a sample of these experiments in Table 13. + +# E.2. MoEs specialization + +![](images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg) +Figure 25. Modality-specific specialization. We visualize the experts specialization to text and image modalities. Models are evaluated on Obelics. + +We investigate multimodal specialization in MoE architectures. We compute a specialization score as the average difference between the number of text/images tokens assigned to each expert and a uniform assignment $(1 / E)$ . Additionally, we visualize the normalized number of text and image tokens assigned to each expert across layers. Figure 25 shows clear modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases but rises again in the very last layers. This suggests that early and final layers require more modality specialization compared to mid-layers. Additionally, we observe several experts shared between text and image modalities, a phenomenon not present in hard-routed or predefined modality-specific experts. + +
L(N,D) = E + A/Nα + B/DβvsL(N,D,S) = A/Nα + B/Dβ + C(1-S)λ + d(1-S)δNγ
ModelEABαβλδγCd
L(N,D) (Eq 1)2.15838177346590.7100.372-----
L(N,D,S) [2]1.0788146600.58900.37200.20.20.709561.0788381475
+ +Table 12. Scaling laws for sparse native multimodal models. + +
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
4-E-top-140.055264.06814.28441.94861.4618.51662.20134.08
8-E-top-141.693465.68417.5542.90863.2619.06567.87739.63
8-E-top-242.854666.46619.16245.34463.9419.36165.98841.649
8-E-top-2 finegrained39.90462.7615.5841.8861.617.757.5235.42
+ +Table 13. SFT results with different MoEs configurations. + +![](images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg) +Figure 26. Scaling laws for native multimodal models. From left to right: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. The scaling exponents are very close for all models. However, MoEs leads to overall lower loss (smaller multiplicative constant) and takes longer to saturate. + +![](images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg) + +![](images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg) + +![](images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg) + +![](images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg) + +![](images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg) + +![](images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg) + +![](images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg) +Figure 27. Scaling laws for native multimodal models. From top to bottom: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. From left to right: cross-entropy on the validation set of image-caption, interleaved and text-only data. + +![](images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg) + +![](images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg) + +
0.289B0.494B1B1.748B2.430B3.714B
0.275B0.464B0.932B1.627B2.280B3.354B
0.275B0.464B0.932B1.627B2.280B3.354B
+ +![](images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg) + +![](images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg) +45-45-10 +40-20-40 + +![](images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg) + +![](images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg) + +![](images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg) +30-30-40 + +![](images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg) + +![](images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg) + +![](images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg) +20-40-40 + +![](images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg) + +![](images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg) + +![](images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg) + +![](images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg) + +![](images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg) +Figure 28. Scaling laws for early-fusion native multimodal models. Our runs across different training mixtures (Image-caption-Interleaved-Text) and FLOPs. We visualize the final validation loss on 3 data types: HQITP (left), Obelics (middle) and DCLM (right). \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07951/images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg b/data/2025/2504_07xxx/2504.07951/images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b31c984259354d6eae7fce41771eaa39e46a062 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c299be90f4d08a9de210a762e0aa6723adb0bdf5c20e440443285a2a8dc7a513 +size 15617 diff --git a/data/2025/2504_07xxx/2504.07951/images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg b/data/2025/2504_07xxx/2504.07951/images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee77dbacdd1b3125df054f7bca5c35d40fe3ed42 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e59c46793a8db13f80324dba5a172f9366419d0a66d70f875bb5fa6dbd7fc00c +size 30724 diff --git a/data/2025/2504_07xxx/2504.07951/images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg b/data/2025/2504_07xxx/2504.07951/images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32f13d84174d77225b87d111bd63f1856ea7572e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e3587a9a919b215d401866418a33aa0f0f531d7246cab393e6fd6489d92b65b +size 23904 diff --git a/data/2025/2504_07xxx/2504.07951/images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg b/data/2025/2504_07xxx/2504.07951/images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1addd32ea38871f3f8638a77b403630de6993e5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e009010ee4e055a0c2065e0255dbb23628bde4430c65dba21a390035a2a0130 +size 32624 diff --git a/data/2025/2504_07xxx/2504.07951/images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg b/data/2025/2504_07xxx/2504.07951/images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b5b8b9d5c26f2065ce5335e59e816c9c624f24c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43fd38dc1e656d0523ac970e360a36721f76885b8c134835e9ef3d1295c945dd +size 20393 diff --git a/data/2025/2504_07xxx/2504.07951/images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg b/data/2025/2504_07xxx/2504.07951/images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b15e47f93a47504416cab0c7fd3b7f47d5bd4fdb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57dafef267170d559349ef6c25c23b5f6d61e4296a4ec0a1228432360b1401f8 +size 25241 diff --git a/data/2025/2504_07xxx/2504.07951/images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg b/data/2025/2504_07xxx/2504.07951/images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..648c7eb067a66f780cdcae41b572559e5ce2dc43 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13ecb42111eed39a7e22ce87e2a3d62ac2678cbbd18caf691c39ea2d385ee7ca +size 19462 diff --git a/data/2025/2504_07xxx/2504.07951/images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg b/data/2025/2504_07xxx/2504.07951/images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0309b9c832b9c403cb5467dac58fdec09445b4b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20299e4ff31ef9beb8023b5f2f1987ade5176fcf7687b78f1e88d47adacd0bdc +size 10265 diff --git a/data/2025/2504_07xxx/2504.07951/images/12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg b/data/2025/2504_07xxx/2504.07951/images/12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a02a0dfd9f7acc6ddf41ac30b6612c987f8f190 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25f3c6df6865ff16443300c50bcc48386e74c378cb9759f54fc96c5b4b1cba7e +size 7930 diff --git a/data/2025/2504_07xxx/2504.07951/images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg b/data/2025/2504_07xxx/2504.07951/images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46bf96b2bcf79304b20ee5671261b8589f362b55 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa7e5b97cf4989dc983afb9ef466c50db9fdbf30d679141c508ec4e62ac12117 +size 10065 diff --git a/data/2025/2504_07xxx/2504.07951/images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg b/data/2025/2504_07xxx/2504.07951/images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cee2432dce6de28cf9f57c15c3a44b4e384682d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:720fb9e8f3f79b40801bf394e74776eef85e057fc8a82e6dba462fda812940f7 +size 18265 diff --git a/data/2025/2504_07xxx/2504.07951/images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg b/data/2025/2504_07xxx/2504.07951/images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6015f8bcd5ebcb697246e14f82ad6d8ecd5ac06d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2778d89edb5349a005d8af9cd8ca186a78feac5e04ec558e3a0ef3b1d5eba7f +size 8136 diff --git a/data/2025/2504_07xxx/2504.07951/images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg b/data/2025/2504_07xxx/2504.07951/images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75f407741cb96864d1cc7a1d0d944b69f4315016 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53864266b05762bec23855b03935fdcfd5b35170b34965cff8f8c438568bbd02 +size 18524 diff --git a/data/2025/2504_07xxx/2504.07951/images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg b/data/2025/2504_07xxx/2504.07951/images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0156c2cd942f449f05efb6c996a16b603b19b86a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee4845340080f9640f5a78d7dd8f6184a83146f6232b5220c40630e91a2c0057 +size 21762 diff --git a/data/2025/2504_07xxx/2504.07951/images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg b/data/2025/2504_07xxx/2504.07951/images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4329a112267a65d79e12b04c21973771ef2bb3df --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e141cddf77f03676d482402e4caa7d819cab22b470a6646cf0238ce7268f9fa +size 8542 diff --git a/data/2025/2504_07xxx/2504.07951/images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg b/data/2025/2504_07xxx/2504.07951/images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa9ddbaecefbb78591e8fb568e30fb50fc004775 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca85a7b6f288f15890f21fa9a222ebb1e853f305df2e3e747df5e5da2d205c49 +size 15141 diff --git a/data/2025/2504_07xxx/2504.07951/images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg b/data/2025/2504_07xxx/2504.07951/images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e64a102da68b5bca07622156cbdd40169a7157b6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57ba7cd72485efe6551b5f4d2c51fd4d13395a078a13ce9588216a18bec1eb3c +size 9343 diff --git a/data/2025/2504_07xxx/2504.07951/images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg b/data/2025/2504_07xxx/2504.07951/images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..080db7b72a14c22f68524977a84296dadf1c027e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddb95b66ec672b8cf0afc45ced9c05eff3bfe484dde7941e43a3c684599593a3 +size 9555 diff --git a/data/2025/2504_07xxx/2504.07951/images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg b/data/2025/2504_07xxx/2504.07951/images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecfa25e92927b79e7e3d5db500edab991ea4c07a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c22ea3c3c957fbadc689a3d6a4db0510210a4894141d69f1a6ff47fe545046b +size 19677 diff --git a/data/2025/2504_07xxx/2504.07951/images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg b/data/2025/2504_07xxx/2504.07951/images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e1a43d974665460faa214f60d9bbc81af6479a6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:001f6cad6611bdebdd14bfa351403787680ec01cc5dae2178aef7f6abf36d6ec +size 18308 diff --git a/data/2025/2504_07xxx/2504.07951/images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg b/data/2025/2504_07xxx/2504.07951/images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d7fe01115dae78a95fb79a8d5842cec596c4716 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9730c4156b3f954351ec4c8c70e38ecea6a4708e483747297c048cdb7e50fdf0 +size 68440 diff --git a/data/2025/2504_07xxx/2504.07951/images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg b/data/2025/2504_07xxx/2504.07951/images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d84746e7acd4da9b754edddba5587482d0449ed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd5b165e0775e023e82abfbe2a2e6fb9a16c6b15284da35157904489c6bcdd9 +size 13348 diff --git a/data/2025/2504_07xxx/2504.07951/images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg b/data/2025/2504_07xxx/2504.07951/images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46cb987124dfbec1904e5152a3443be994b7819a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff22de7ee42b98619b851be725cb0f33876754d8e6c2f54561f454922d4d183b +size 18306 diff --git a/data/2025/2504_07xxx/2504.07951/images/318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg b/data/2025/2504_07xxx/2504.07951/images/318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5511a47ba6e5dcd163c96e7537028056159585e6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58648264ead285999759e962b2db15ace371e13fe3379ce8d8f388db17717a87 +size 3888 diff --git a/data/2025/2504_07xxx/2504.07951/images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg b/data/2025/2504_07xxx/2504.07951/images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5485d2b404da28c07ab2465d305e3b34cee0eeb6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd18a4827977af4d1d1ad797a0e6d44cc60b277a8926a4e57064b952d2669ae3 +size 17078 diff --git a/data/2025/2504_07xxx/2504.07951/images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg b/data/2025/2504_07xxx/2504.07951/images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5711faee10f5bfdd1ac4eedaf63c9316057d3323 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a1a6c54759c005e5f718dda1515156ba10a0f12d49c5df18ed17133698a1a43 +size 44190 diff --git a/data/2025/2504_07xxx/2504.07951/images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg b/data/2025/2504_07xxx/2504.07951/images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2259e613b518ac30a9d3c09d22caa3c190faaa14 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e214c8e6c9ab997910939ae09d3bd4c74879ec51b11566a2019bd901c9c483b7 +size 35141 diff --git a/data/2025/2504_07xxx/2504.07951/images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg b/data/2025/2504_07xxx/2504.07951/images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7a14ac49f2c9ae38c9fd6ca684a77609417a6b7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51b83f693ab4d342cf4cdf1d70b11aa028bc303383da261b51bb972de9ca5901 +size 19775 diff --git a/data/2025/2504_07xxx/2504.07951/images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg b/data/2025/2504_07xxx/2504.07951/images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15201543f5eb7c832d6ce3e7b8e49ed85669a8d4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d7cb57b252f9b10c5e7ce6ee730b68dd2f98e3187b8878a7e88386bcfa2a8b9 +size 11670 diff --git a/data/2025/2504_07xxx/2504.07951/images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg b/data/2025/2504_07xxx/2504.07951/images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6a5b8e8da68fa472d85194f7743f2745f7416a2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5241c7dd1680ce9063461e7d99a59802d5b217ed53e3fa59d3def96d5329550 +size 7840 diff --git a/data/2025/2504_07xxx/2504.07951/images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg b/data/2025/2504_07xxx/2504.07951/images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e20cef38b10fd113266718934795fbbfa9e9ef0e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caebbcafeb45b4780d0591eb52c4ee4adb1b6d262049bcec14ff3aaa2d022a63 +size 16426 diff --git a/data/2025/2504_07xxx/2504.07951/images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg b/data/2025/2504_07xxx/2504.07951/images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg new file mode 100644 index 0000000000000000000000000000000000000000..704243d8e3b3831c0a46bbaf98f11796286a22de --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7a187892bca234898962bfab0289e7bfc7887121bf1ce9579f39751b9066e6a +size 16007 diff --git a/data/2025/2504_07xxx/2504.07951/images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg b/data/2025/2504_07xxx/2504.07951/images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3f843b1dfb990fd1a103ef4deb1f66c86d0fe5c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52bd688305eec5a9104826d11933e4d853adc0122cb96d4f13a3090344047efa +size 28038 diff --git a/data/2025/2504_07xxx/2504.07951/images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg b/data/2025/2504_07xxx/2504.07951/images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b42e007c94a60493dd75ff1ddc879694fea1d0d0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e15cc51c0d02e9fefb97cca8ef6d9c37fe1be4dc67c60e8aef9b551726a2533 +size 21677 diff --git a/data/2025/2504_07xxx/2504.07951/images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg b/data/2025/2504_07xxx/2504.07951/images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfc18623a06b6511b118e83b11f3c2519a370be5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6619689f4ef83741e82dc6b61eb3060430041efe5e0d13d315508405f6988b0b +size 22228 diff --git a/data/2025/2504_07xxx/2504.07951/images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg b/data/2025/2504_07xxx/2504.07951/images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c353a75199e67d657986c8b143ca7f983c06ac00 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a09e10ab45b6c1396ad9cbc4588e1163a242409f2fd450f222b3a50ed1be18 +size 19460 diff --git a/data/2025/2504_07xxx/2504.07951/images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg b/data/2025/2504_07xxx/2504.07951/images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48ff996cfd69e7fed28ef067a20be8796197c48c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c585017e193f5843e168afcdf5219383df7541021a5dca8afb1603b6250bb6fe +size 80225 diff --git a/data/2025/2504_07xxx/2504.07951/images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg b/data/2025/2504_07xxx/2504.07951/images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07721eec06f2912ad804ef22b6ee171dea1a1a73 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1964563c05b2828176dc6e3ccc2be70dd179418e328d02838365a0a836d2c9ae +size 15219 diff --git a/data/2025/2504_07xxx/2504.07951/images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg b/data/2025/2504_07xxx/2504.07951/images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5d82e6b7790b8b82afe776b31aa9593cf5e97d6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cbcea4e2d0d5b251132af1fd91ee5fc056d307b3e6c46a4b20f9353b3558c72 +size 9262 diff --git a/data/2025/2504_07xxx/2504.07951/images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg b/data/2025/2504_07xxx/2504.07951/images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d047e6ca9897d6d0c581f45c73b0d1cf1595aaca --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0424a3f313d7659a5530528e3d40ccb6023f13f6cce5354d9966de3ddd6b02c6 +size 12693 diff --git a/data/2025/2504_07xxx/2504.07951/images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg b/data/2025/2504_07xxx/2504.07951/images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c0e12dcb342a8ab11eab20ada27aefca6b69dd1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1923c073b5a435cd1ccb707120c9f4d4f8fd4fcc5da7d1d9fa5f7311a86d550e +size 15239 diff --git a/data/2025/2504_07xxx/2504.07951/images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg b/data/2025/2504_07xxx/2504.07951/images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5727514cdffcd156e8fa85defe8b251d5f0ef822 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34d661d5a77e2383a44d093491116602b92130c0e442ee546ffff69c888eaba0 +size 16229 diff --git a/data/2025/2504_07xxx/2504.07951/images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg b/data/2025/2504_07xxx/2504.07951/images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6b9dfe0bef2ddc551f9a68f8fd0a20c75d4e8b1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8abbf46a89c1cbf75d66a07e5a4eedc0e7aaf036c3742d6a98e13416b5d604cc +size 22076 diff --git a/data/2025/2504_07xxx/2504.07951/images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg b/data/2025/2504_07xxx/2504.07951/images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg new file mode 100644 index 0000000000000000000000000000000000000000..735f922e04a011c4718995ea5511c1755b72b81a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4f14ff1a3a8d341c960a9f003760cc078d3594c59741697b99f725d88efee34 +size 18545 diff --git a/data/2025/2504_07xxx/2504.07951/images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg b/data/2025/2504_07xxx/2504.07951/images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2f6e86b2c98aac5e177a460c63707e7dc891d42 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:780bfa1d35ea5f06e00e16bfa00f1ab4422c9e69e8437783cba6023ea47b7ac1 +size 16546 diff --git a/data/2025/2504_07xxx/2504.07951/images/7f483ac7b50797f6577cf5a7c10d402453a3e7f5e35ae72880d054ddcaeeb21f.jpg b/data/2025/2504_07xxx/2504.07951/images/7f483ac7b50797f6577cf5a7c10d402453a3e7f5e35ae72880d054ddcaeeb21f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ca9525fe16974ff23cc51ef8870f93e277da0c9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/7f483ac7b50797f6577cf5a7c10d402453a3e7f5e35ae72880d054ddcaeeb21f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:361a31b3e7d12979ad2cada90471e552cfec22c38c7fd51d7db8a828f2f6c50f +size 3835 diff --git a/data/2025/2504_07xxx/2504.07951/images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg b/data/2025/2504_07xxx/2504.07951/images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de868437b91fa105353c2d6ad3f4b07a2dafeef4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f54c82ebd44ba9e306dcb41358ec1ff627881692022e83b77b91f3129c327c8 +size 14041 diff --git a/data/2025/2504_07xxx/2504.07951/images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg b/data/2025/2504_07xxx/2504.07951/images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94bbeff5893e5bf9ebc7803cb0918bb2e908c31b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a68717c812c7ceba13183e1fef9dfe5ee362f68665f922bca7971c66211a1a1 +size 20667 diff --git a/data/2025/2504_07xxx/2504.07951/images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg b/data/2025/2504_07xxx/2504.07951/images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8bc7635ecf61a45303dfc94090260001e47b607b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3298c658cbcdc583156ee21ae5a8a6325e2cab99734f94ab73e2a6d265cd8686 +size 14281 diff --git a/data/2025/2504_07xxx/2504.07951/images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg b/data/2025/2504_07xxx/2504.07951/images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81169c4a1a0ed52f04f697cd2159ebe9798bf32f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4734280b324d795cc0cfaf28cc25c9f53905e7960b52b89548f7bfeb330b7b3 +size 7883 diff --git a/data/2025/2504_07xxx/2504.07951/images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg b/data/2025/2504_07xxx/2504.07951/images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df4cb5d8523cbda0314e63d1a71ca1b135215a27 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec1513973e3de6621ec297c72b4bf243ee06fb22e7b6191f8bc70fc4619c733 +size 41453 diff --git a/data/2025/2504_07xxx/2504.07951/images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg b/data/2025/2504_07xxx/2504.07951/images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96e150081f1d57110b5b46e84e427cf2b0a85c7a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39ee3972cf81611b345ddea44e9f894730f48816bfbfd641a125453d16c257bd +size 27694 diff --git a/data/2025/2504_07xxx/2504.07951/images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg b/data/2025/2504_07xxx/2504.07951/images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a644185b33f2beccb4efa6e10ebbda2e513823f1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3111d9f502f32f6545bec0fb033785f07249e0228fe8bc4ded3081d0ec658a9b +size 13948 diff --git a/data/2025/2504_07xxx/2504.07951/images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg b/data/2025/2504_07xxx/2504.07951/images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cceed915253a4d9f1c6c5b5bab9f7cd142845f4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58ca9e7de4f602febbd255e9cc1a72277ee78d1d347954e9920ef6e21595b6af +size 21128 diff --git a/data/2025/2504_07xxx/2504.07951/images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg b/data/2025/2504_07xxx/2504.07951/images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfa2edf6829b10eb2b61bfce789ffa8cf7c16a96 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4941f3e1a0c6a50f2ad995f7fa7bd4292a407c468695399ea10ab9d0accaadce +size 16455 diff --git a/data/2025/2504_07xxx/2504.07951/images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg b/data/2025/2504_07xxx/2504.07951/images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d9127a539023cccaa2f81eee2f009059e693477 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:673fed6ac3863853efee5dc7f786200c9694c02f02a12ae547ca356364e9cb82 +size 8094 diff --git a/data/2025/2504_07xxx/2504.07951/images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg b/data/2025/2504_07xxx/2504.07951/images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c48ce4456b5658ea1b453e1804875e079d29b161 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14bc35f7fcbc194ae8e187b0c615b0da86e08b66a6e275f812aa5ee9b8c6384c +size 90383 diff --git a/data/2025/2504_07xxx/2504.07951/images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg b/data/2025/2504_07xxx/2504.07951/images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a8bf44fc15b79918cab4a33fb59a3d851f0e6b1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70454d8639404d1d50d82e2d15a12dab82f4395ff39b3299a17085a11c3d5c32 +size 21154 diff --git a/data/2025/2504_07xxx/2504.07951/images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg b/data/2025/2504_07xxx/2504.07951/images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a2051d42a21bd6831561744dfc68a24e419129c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad551bf031554501ae30a5b2895598d0b2d0d5739d1902ff3d3a8271ffa76515 +size 10290 diff --git a/data/2025/2504_07xxx/2504.07951/images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg b/data/2025/2504_07xxx/2504.07951/images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a857c0a79ac59b4814fe78dbcd74f6dd495d8b6e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d4339f9b259b9ba83325103017e5883dee4733106643a37ece415d27f527e6 +size 18188 diff --git a/data/2025/2504_07xxx/2504.07951/images/954ecdf74a27126739fe55ea72c130d722bf017ede0c1fb5950a4e172b17fdcd.jpg b/data/2025/2504_07xxx/2504.07951/images/954ecdf74a27126739fe55ea72c130d722bf017ede0c1fb5950a4e172b17fdcd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fbf12ef6f29f6b634abff81f49dc623528ebbc7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/954ecdf74a27126739fe55ea72c130d722bf017ede0c1fb5950a4e172b17fdcd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b0ccec89a98db394b78cdbac6bc8a6bc7fc6ad0be045fcdb4ed30d31c003bb8 +size 37118 diff --git a/data/2025/2504_07xxx/2504.07951/images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg b/data/2025/2504_07xxx/2504.07951/images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0004a18d5b32fb21f088c7f3e5d823b775108bd2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a682691a75371f03857f4d75bcd82e617b7a4fd0f5934cf457fe86df792b3fd +size 19148 diff --git a/data/2025/2504_07xxx/2504.07951/images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg b/data/2025/2504_07xxx/2504.07951/images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7f76388692ec99669a3e51854554bbbe1aa46a1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4857718d4d53c94f6372b0003706ee2eb28347470772d9f9bc571d9c91413e5 +size 16370 diff --git a/data/2025/2504_07xxx/2504.07951/images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg b/data/2025/2504_07xxx/2504.07951/images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg new file mode 100644 index 0000000000000000000000000000000000000000..110de4521b860306001760dc968ba12dbb5dd22f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba8fd2d53a10d6fcd507f6753574a12cc66fafb0703df4e44df1fcec4e335a89 +size 15781 diff --git a/data/2025/2504_07xxx/2504.07951/images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg b/data/2025/2504_07xxx/2504.07951/images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11595522805dfd44f4cefdb42d89ce76b65a3efa --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc35603b77561bfa161ee29355471aa008bf84b4a368c72fc9d340812ece8559 +size 36449 diff --git a/data/2025/2504_07xxx/2504.07951/images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg b/data/2025/2504_07xxx/2504.07951/images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62100fbedda2b138065444adeba986d43d872aae --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cd25899cfd6fea15b9247fb28ff22087cec64925a6b599e8a05ea5cfc6381c5 +size 20468 diff --git a/data/2025/2504_07xxx/2504.07951/images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg b/data/2025/2504_07xxx/2504.07951/images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee1acc1581ec3d5c68df4de3a2de5b6a8440ecd8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c883e39a04cb98ce0f599cde82aaec36808a1a76b8b4b08cdd566f9a648eba1d +size 22015 diff --git a/data/2025/2504_07xxx/2504.07951/images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg b/data/2025/2504_07xxx/2504.07951/images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg new file mode 100644 index 0000000000000000000000000000000000000000..486c28599079c43987a51624e85ded126dfc0acc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3730eacfafedbfa6b3206892737dab80a1ae23a1667cb64d602f83062df801 +size 19451 diff --git a/data/2025/2504_07xxx/2504.07951/images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg b/data/2025/2504_07xxx/2504.07951/images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg new file mode 100644 index 0000000000000000000000000000000000000000..208309573383e507dff442079b86965ead8347c8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fd4856d33f1566fb4b29f06642529cbb21a2bfc912cc5fe3c5e41bcbf0a5c95 +size 29369 diff --git a/data/2025/2504_07xxx/2504.07951/images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg b/data/2025/2504_07xxx/2504.07951/images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9baa30acd12599dfcf2a639b89982c79a863cb7b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14c9d5529d800d474139922c26c019745627d30f17e52ac9aa60d58fdfe0eb14 +size 22676 diff --git a/data/2025/2504_07xxx/2504.07951/images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg b/data/2025/2504_07xxx/2504.07951/images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04afcda5bba709d50110b29a298e99617601a9a7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73592024d432a0a6d4d22bf572200612f746bc193bb96811e834e29c26a2a43e +size 23225 diff --git a/data/2025/2504_07xxx/2504.07951/images/bbdc82534f1740e7817c2ab64a135366e6afbf6047c72c08d9e836e9c3a7a7ee.jpg b/data/2025/2504_07xxx/2504.07951/images/bbdc82534f1740e7817c2ab64a135366e6afbf6047c72c08d9e836e9c3a7a7ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a035aa1ab0574ed5ff8e7fc2a817716ba9dcf19 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/bbdc82534f1740e7817c2ab64a135366e6afbf6047c72c08d9e836e9c3a7a7ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d74a6ad856b9ea55f99947f560130eee41160c33635552c93b35a9d97e4dc82f +size 42622 diff --git a/data/2025/2504_07xxx/2504.07951/images/bd11f503cbb83405e5763b94f0bf05d0647d5566a363956255066979df79ee25.jpg b/data/2025/2504_07xxx/2504.07951/images/bd11f503cbb83405e5763b94f0bf05d0647d5566a363956255066979df79ee25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..deff6a7ac84bcd4f62122a2b474378c98ecc0f60 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/bd11f503cbb83405e5763b94f0bf05d0647d5566a363956255066979df79ee25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a482137189e90f834c221e3f25023c78e06f4e000767a19d7e30002e9f4553c +size 24380 diff --git a/data/2025/2504_07xxx/2504.07951/images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg b/data/2025/2504_07xxx/2504.07951/images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1843cf2bebd4c1f15a6c51e4a439ff8dd08250a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e446dc47aa845e4a42ef0564af388f0efe1570e4623c66518b6d193d7a1540be +size 18522 diff --git a/data/2025/2504_07xxx/2504.07951/images/c108eee9a2065d7228436e8d9b0fa0023a328984cbb26e12f2be985be92453a1.jpg b/data/2025/2504_07xxx/2504.07951/images/c108eee9a2065d7228436e8d9b0fa0023a328984cbb26e12f2be985be92453a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64b10e6940b030a576285b922348970524939857 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/c108eee9a2065d7228436e8d9b0fa0023a328984cbb26e12f2be985be92453a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12338d3a6fcd013405571372341e1763022383e97fdde0226e85d525c9759e89 +size 11876 diff --git a/data/2025/2504_07xxx/2504.07951/images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg b/data/2025/2504_07xxx/2504.07951/images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffddb2063b8a5c703ef04116df311bfde1d5aa2e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cecad2c92d67cd27a5563a4382d4c4e309a1399905970e684f4b10169a8f1825 +size 10635 diff --git a/data/2025/2504_07xxx/2504.07951/images/ca37c4a7a1a0179f3629d000ae32bcaef96674560f551745f099a33c45e3a31c.jpg b/data/2025/2504_07xxx/2504.07951/images/ca37c4a7a1a0179f3629d000ae32bcaef96674560f551745f099a33c45e3a31c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a53e59a020ab2eb28f6ef461edb2237f46cc3634 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/ca37c4a7a1a0179f3629d000ae32bcaef96674560f551745f099a33c45e3a31c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565afa126d98d59e94991967bc9e1aaaf9d8e4ccad086527e18a69bb132f5dd0 +size 20712 diff --git a/data/2025/2504_07xxx/2504.07951/images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg b/data/2025/2504_07xxx/2504.07951/images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b3c2413e8eb4122f9e184fccb48a027e8e7687d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e526f29330af274ef523e81e110bf2b6304104fc0f3e9e09c7151208009ccf2e +size 20170 diff --git a/data/2025/2504_07xxx/2504.07951/images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg b/data/2025/2504_07xxx/2504.07951/images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0e89d7dd4bd58a0499e58bbc32adfb945711b9d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e06776dfce305cca4aef06275dd43bb64b2a2cd9570d88584d62d51cf8493dc1 +size 20488 diff --git a/data/2025/2504_07xxx/2504.07951/images/d5047d4224dc4663381ccf110ebe91f262de136e0cc31df5c5930e78cd22c3cf.jpg b/data/2025/2504_07xxx/2504.07951/images/d5047d4224dc4663381ccf110ebe91f262de136e0cc31df5c5930e78cd22c3cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39a8e5e0471055b85a04f2fc804aa3dcf9187ac5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/d5047d4224dc4663381ccf110ebe91f262de136e0cc31df5c5930e78cd22c3cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8763d7558feaff4af91de0f45542afaadd178a6545a0c73e10a93215529855ba +size 36344 diff --git a/data/2025/2504_07xxx/2504.07951/images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg b/data/2025/2504_07xxx/2504.07951/images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0062512779c4207b3e7c4c5cc54000b5f01f16c0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7b91cb85fc53bc04ed9ab4f67c76dcd6b1d6a281affc0c8f66ff3b100169f00 +size 16287 diff --git a/data/2025/2504_07xxx/2504.07951/images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg b/data/2025/2504_07xxx/2504.07951/images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fad6cc7a2532038d3686b761c72620dae84329ce --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:411fbd5cfeac8c1b4311a7eafb2e5a153eae2e7d97b46af2914c40a0ea725bb0 +size 21757 diff --git a/data/2025/2504_07xxx/2504.07951/images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg b/data/2025/2504_07xxx/2504.07951/images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57d34abe4d9f4719ba36ecdd59e1ad59a8a0b708 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8c667a2ae42615fd0d43a9f3765311601a9ebe6679bfc27b04d50720ec9ab44 +size 17344 diff --git a/data/2025/2504_07xxx/2504.07951/images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg b/data/2025/2504_07xxx/2504.07951/images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..976c8161bef8e45afc2b38c29babb84ba0f27173 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06465226ad2043ef3f428c36661e546236426676db99e43baa0bdd63ec3069e1 +size 19038 diff --git a/data/2025/2504_07xxx/2504.07951/images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg b/data/2025/2504_07xxx/2504.07951/images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ffdba91d9bb49434910b31d4bc456743a2653cd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:059cb0a747747218cd3118b99530692d156e1e6eb9d3acec2b40c90a536e31a4 +size 14651 diff --git a/data/2025/2504_07xxx/2504.07951/images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg b/data/2025/2504_07xxx/2504.07951/images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a45700edec4096cb64936e488d0168b6d242ddd6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2bb4dfc1f2c3a3678fbe9183884d2ac6748ccec128c4624319f39bb1d8beb18 +size 14570 diff --git a/data/2025/2504_07xxx/2504.07951/images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg b/data/2025/2504_07xxx/2504.07951/images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21d299658d0a40a3ab2cba540caa3bb88ca7611c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7dd9e95bd0383fd29c97dfb4a9766de6551049472011df3bcb81cd589d627f9 +size 19818 diff --git a/data/2025/2504_07xxx/2504.07951/images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg b/data/2025/2504_07xxx/2504.07951/images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e27795f97eb8ecbab09c9659675a7b21b2026b2c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b881d1150705602e056e454df183bd1ee61a7e71de0e081e4918d22a9b479493 +size 32530 diff --git a/data/2025/2504_07xxx/2504.07951/images/ecb582a4915e09f0f5d7f795e92882a1d6a26f88d15c6eb5d012aeb43340d06e.jpg b/data/2025/2504_07xxx/2504.07951/images/ecb582a4915e09f0f5d7f795e92882a1d6a26f88d15c6eb5d012aeb43340d06e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0146f5c11af492906e39ea997f04f5f358dfd28a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/ecb582a4915e09f0f5d7f795e92882a1d6a26f88d15c6eb5d012aeb43340d06e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2ccf06ebc23b23fb36d52f94bf5766ff970f001ad00aea3bb75243816f0426c +size 25846 diff --git a/data/2025/2504_07xxx/2504.07951/images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg b/data/2025/2504_07xxx/2504.07951/images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca42d052359f83b2cf6bc68a098cf4d957d89b4c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e01cc7e2b6003a8547ac3dcda30d10ec6c847bf9fc219d09cb33ebf12ff822e +size 20092 diff --git a/data/2025/2504_07xxx/2504.07951/images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg b/data/2025/2504_07xxx/2504.07951/images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b3b09b6788a1dcfcc032fb0ee9e6d90e4462393 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee0be5364cf1fe55740fbcb7fc3dd35a3d2f4c2b7d47d26a7af083f5087d6b55 +size 10051 diff --git a/data/2025/2504_07xxx/2504.07951/images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg b/data/2025/2504_07xxx/2504.07951/images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb313173ab5bdbc1a8af14665c5db8f74e30bfed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fdc55e89c3403b218f21bba68b7425f58045067073472351365404848539b85 +size 18456 diff --git a/data/2025/2504_07xxx/2504.07951/images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg b/data/2025/2504_07xxx/2504.07951/images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87e5c067e896fad74c930449260a80dedba8da01 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e36d9e2a0c15ef4938118d9c9c8a71d80756063b6ba8fd4d44f45c3b968ae17a +size 20797 diff --git a/data/2025/2504_07xxx/2504.07951/images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg b/data/2025/2504_07xxx/2504.07951/images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6ba959843ff8819dd7ecc5e1ec484ca4c1660b2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2929a2672118bf15e10c4ee1c4703f3dc34decb9c877f50e2f8ecd1d10e22b3 +size 19602 diff --git a/data/2025/2504_07xxx/2504.07951/layout.json b/data/2025/2504_07xxx/2504.07951/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1c7c52cf7787c41feefab43fbbc051b89864ba48 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07951/layout.json @@ -0,0 +1,17325 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 168, + 103, + 442, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 103, + 442, + 121 + ], + "spans": [ + { + "bbox": [ + 168, + 103, + 442, + 121 + ], + "type": "text", + "content": "Scaling Laws for Native Multimodal Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 144, + 178, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 178, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 178, + 156 + ], + "type": "text", + "content": "Mustafa Shukor²" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 198, + 144, + 250, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 144, + 250, + 156 + ], + "spans": [ + { + "bbox": [ + 198, + 144, + 250, + 156 + ], + "type": "text", + "content": "Enrico Fini" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 272, + 144, + 416, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 144, + 416, + 156 + ], + "spans": [ + { + "bbox": [ + 272, + 144, + 416, + 156 + ], + "type": "text", + "content": "Victor Guilherme Turrisi da Costa1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 438, + 144, + 503, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 144, + 503, + 156 + ], + "spans": [ + { + "bbox": [ + 438, + 144, + 503, + 156 + ], + "type": "text", + "content": "Matthieu Cord²" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 212, + 163, + 286, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 163, + 286, + 176 + ], + "spans": [ + { + "bbox": [ + 212, + 163, + 286, + 176 + ], + "type": "text", + "content": "Joshua Susskind" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 163, + 397, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 397, + 177 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 397, + 177 + ], + "type": "text", + "content": "Alaaeldin El-Nouby" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 225, + 182, + 261, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 182, + 261, + 197 + ], + "spans": [ + { + "bbox": [ + 225, + 182, + 261, + 197 + ], + "type": "text", + "content": "1Apple" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 283, + 182, + 384, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 182, + 384, + 197 + ], + "spans": [ + { + "bbox": [ + 283, + 182, + 384, + 197 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 283, + 182, + 384, + 197 + ], + "type": "text", + "content": "Sorbonne University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 224, + 200, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 224, + 200, + 236 + ], + "spans": [ + { + "bbox": [ + 152, + 224, + 200, + 236 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 250, + 296, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 250, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 250, + 296, + 502 + ], + "type": "text", + "content": "Building general-purpose models that can effectively perceive the world through multimodal signals has been a long-standing goal. Current approaches involve integrating separately pre-trained components, such as connecting vision encoders to LLMs and continuing multimodal training. While such approaches exhibit remarkable sample efficiency, it remains an open question whether such late-fusion architectures are inherently superior. In this work, we revisit the architectural design of native multimodal models (NMMs)-those trained from the ground up on all modalities—and conduct an extensive scaling laws study, spanning 457 trained models with different architectures and training mixtures. Our investigation reveals no inherent advantage to late-fusion architectures over early-fusion ones, which do not rely on image encoders or tokenizers. On the contrary, early-fusion exhibits stronger performance at lower parameter counts, is more efficient to train, and is easier to deploy. Motivated by the strong performance of the early-fusion architectures, we show that incorporating Mixture of Experts (MoEs) allows models to learn modality-specific weights, significantly benefiting performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 525, + 135, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 525, + 135, + 537 + ], + "spans": [ + { + "bbox": [ + 56, + 525, + 135, + 537 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 544, + 295, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 295, + 641 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 295, + 641 + ], + "type": "text", + "content": "Multimodality provides a rich signal for perceiving and understanding the world. Advances in vision [23, 52, 55, 80] and language models [3, 19, 67] have enabled the development of powerful multimodal models that understand language, images, and audio. A common approach involves grafting separately pre-trained unimodal models, such as connecting a vision encoder to the input layer of an LLM [6, 9, 35, 43, 62, 64, 73, 78]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "Although this seems like a convenient approach, it remains an open question whether such late-fusion strategies are inherently optimal for understanding multimodal signals. Moreover, with abundant multimodal data available, initializing from unimodal pre-training is potentially detrimental, as it may introduce biases that prevent the model" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 317, + 225, + 535, + 310 + ], + "blocks": [ + { + "bbox": [ + 317, + 225, + 535, + 310 + ], + "lines": [ + { + "bbox": [ + 317, + 225, + 535, + 310 + ], + "spans": [ + { + "bbox": [ + 317, + 225, + 535, + 310 + ], + "type": "image", + "image_path": "4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 317, + 323, + 535, + 410 + ], + "blocks": [ + { + "bbox": [ + 423, + 313, + 447, + 322 + ], + "lines": [ + { + "bbox": [ + 423, + 313, + 447, + 322 + ], + "spans": [ + { + "bbox": [ + 423, + 313, + 447, + 322 + ], + "type": "text", + "content": "FLOPs" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 317, + 323, + 535, + 410 + ], + "lines": [ + { + "bbox": [ + 317, + 323, + 535, + 410 + ], + "spans": [ + { + "bbox": [ + 317, + 323, + 535, + 410 + ], + "type": "image", + "image_path": "aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 425, + 413, + 450, + 421 + ], + "lines": [ + { + "bbox": [ + 425, + 413, + 450, + 421 + ], + "spans": [ + { + "bbox": [ + 425, + 413, + 450, + 421 + ], + "type": "text", + "content": "FLOPs" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "lines": [ + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "text", + "content": "Figure 1. Scaling properties of Native Multimodal Models. Based on the scaling laws study in § 3.1, we observe: (1) early and late fusion models provide similar validation loss " + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "text", + "content": " when trained with the same compute budget " + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "text", + "content": " (FLOPs); (2) This performance is achieved via a different trade-off between parameters " + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "text", + "content": " and number of training tokens " + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 313, + 425, + 555, + 514 + ], + "type": "text", + "content": ", where early-fusion models require fewer parameters. (3) Sparse early-fusion models achieve lower loss and require more training tokens for a given FLOP budget." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 517, + 555, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 555, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 555, + 613 + ], + "type": "text", + "content": "from fully leveraging cross-modality co-dependancies. An additional challenge is scaling such systems; each component (e.g., vision encoder, LLM) has its own set of hyperparameters, pre-training data mixtures, and scaling properties with respect to the amount of data and compute applied. A more flexible architecture might allow the model to dynamically allocate its capacity across modalities, simplifying scaling efforts." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": "In this work, we focus on the scaling properties of native multimodal models trained from the ground up on multimodal data. We first investigate whether the commonly adopted late-fusion architectures hold an intrinsic advantage by comparing them to early-fusion models, which process raw multimodal inputs without relying on dedicated vision encoders. We conduct scaling experiments on early and late fusion architectures, deriving scaling laws to pre" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 221, + 36, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 221, + 36, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 221, + 36, + 568 + ], + "type": "text", + "content": "arXiv:2504.07951v4 [cs.CV] 9 Aug 2025" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 227 + ], + "type": "text", + "content": "dict their performance and compute-optimal configurations. Our findings indicate that late fusion offers no inherent advantage when trained from scratch. Instead, early-fusion models are more efficient and are easier to scale. Furthermore, we observe that native multimodal models follow scaling laws similar to those of LLMs [26], albeit with slight variations in scaling coefficients across modalities and datasets. Our results suggest that model parameters and training tokens should be scaled roughly equally for optimal performance. Moreover, we find that different multimodal training mixtures exhibit similar overall trends, indicating that our findings are likely to generalize to a broader range of settings." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 228, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 228, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 228, + 295, + 445 + ], + "type": "text", + "content": "While our findings favor early fusion, multimodal data is inherently heterogeneous, suggesting that some degree of parameter specialization may still offer benefits. To investigate this, we explore leveraging Mixture of Experts (MoEs) [59], a technique that enables the model to dynamically allocate specialized parameters across modalities in a symmetric and parallel manner, in contrast to late-fusion models, which are asymmetric and process data sequentially. Training native multimodal models with MoEs results in significantly improved performance and therefore, faster convergence. Our scaling laws for MoEs suggest that scaling number of training tokens is more important than the number of active parameters. This unbalanced scaling is different from what is observed for dense models, due to the higher number of total parameters for sparse models. In addition, Our analysis reveals that experts tend to specialize in different modalities, with this specialization being particularly prominent in the early and last layers." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 453, + 195, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 195, + 466 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 195, + 466 + ], + "type": "text", + "content": "1.1. Summary of our findings" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 472, + 234, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 234, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 234, + 483 + ], + "type": "text", + "content": "Our findings can be summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 484, + 295, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 484, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 484, + 295, + 568 + ], + "type": "text", + "content": "Native Early and Late fusion perform on par: Early fusion models trained from scratch perform on par with their late-fusion counterparts, with a slight advantage to early-fusion models for low compute budgets (Figure 3). Furthermore, our scaling laws study indicates that the compute-optimal models for early and late fusion perform similarly as the compute budget increases (Figure 1 Top)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 568, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 295, + 616 + ], + "type": "text", + "content": "NMMs scale similarly to LLMs: The scaling laws of native multimodal models follow similar laws as text-only LLMs with slightly varying scaling exponents depending on the target data type and training mixture (Table 2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 617, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 653 + ], + "type": "text", + "content": "Late-fusion requires more parameters: Compute-optimal late-fusion models require a higher parameters-to-data ratio when compared to early-fusion (Figure 1 bottom)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 654, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 714 + ], + "type": "text", + "content": "Sparsity significantly benefits early-fusion NMMs: Sparse NMMs exhibit significant improvements compared to their dense counterparts at the same inference cost (Figure 10). Furthermore, they implicitly learn modality-specific weights when trained with sparsity (Figure 12). In" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 318, + 71, + 553, + 168 + ], + "blocks": [ + { + "bbox": [ + 318, + 71, + 553, + 168 + ], + "lines": [ + { + "bbox": [ + 318, + 71, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 318, + 71, + 553, + 168 + ], + "type": "table", + "html": "
ExpressionDefinition
NNumber of parameters in the multimodal decoder. For MoEs this refers to the active parameters only.
DTotal number of multimodal tokens.
NvNumber of parameters in the vision-specific encoder. Only exists in late-fusion architectures.
DvNumber of vision-only tokens.
CTotal number of FLOPs, estimated as C = 6ND for early-fusion and C = 6(NvDv + ND) for late-fusion.
LValidation loss measured as the average over interleaved image-text, image-caption, and text-only data mixtures.
", + "image_path": "d5047d4224dc4663381ccf110ebe91f262de136e0cc31df5c5930e78cd22c3cf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 170, + 553, + 180 + ], + "lines": [ + { + "bbox": [ + 313, + 170, + 553, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 170, + 553, + 180 + ], + "type": "text", + "content": "Table 1. Definitions of the expressions used throughout the paper." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 185, + 553, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 185, + 553, + 220 + ], + "spans": [ + { + "bbox": [ + 313, + 185, + 553, + 220 + ], + "type": "text", + "content": "addition, compute-optimal models rely more on scaling the number of training tokens than the number of active parameters as the compute-budget grows (Figure 1 Bottom)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 221, + 553, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 221, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 313, + 221, + 553, + 270 + ], + "type": "text", + "content": "Modality-agnostic routing beats Modality-aware routing for Sparse NMMs: Training sparse mixture of experts with modality-agnostic routing consistently outperforms models with modality-aware routing (Figure 11)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 276, + 399, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 276, + 399, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 276, + 399, + 289 + ], + "type": "text", + "content": "2. Preliminaries" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 296, + 388, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 296, + 388, + 308 + ], + "spans": [ + { + "bbox": [ + 313, + 296, + 388, + 308 + ], + "type": "text", + "content": "2.1. Definitions" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 314, + 553, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 314, + 553, + 386 + ], + "spans": [ + { + "bbox": [ + 313, + 314, + 553, + 386 + ], + "type": "text", + "content": "Native Multimodal Models (NMMs): Models that are trained from scratch on all modalities simultaneously without relying on pre-trained LLMs or vision encoders. Our focus is on the representative image and text modalities, where the model processes both text and images as input and generates text as output." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 386, + 554, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 554, + 469 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 554, + 469 + ], + "type": "text", + "content": "Early fusion: Enabling multimodal interaction from the beginning, using almost no modality-specific parameters (e.g., except a linear layer to patchify images). Using a single transformer model, this approach processes raw multimodal input—tokenized text and continuous image patches—with no image discretization. In this paper, we refer to the main transformer as the decoder." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 471, + 554, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 471, + 554, + 518 + ], + "spans": [ + { + "bbox": [ + 313, + 471, + 554, + 518 + ], + "type": "text", + "content": "Late fusion: Delaying the multimodal interaction to deeper layers, typically after separate unimodal components has processed that process each modality independently (e.g., a vision encoder connected to a decoder)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 519, + 553, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 519, + 553, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 519, + 553, + 555 + ], + "type": "text", + "content": "Modality-agnostic routing: In sparse mixture-of-experts, modality-agnostic routing refers to relying on a learned router module that is trained jointly with the model." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 555, + 553, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 553, + 590 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 553, + 590 + ], + "type": "text", + "content": "Modality-aware routing: Routing based on pre-defined rules such as routing based on the modality type (e.g., vision-tokens, token-tokens)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 600, + 399, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 600, + 399, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 600, + 399, + 612 + ], + "type": "text", + "content": "2.2. Scaling Laws" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 617, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 554, + 713 + ], + "type": "text", + "content": "We aim to understand the scaling properties of NMMs and how different architectural choices influence trade-offs. To this end, we analyze our models within the scaling laws framework proposed by Hoffmann et al. [26], Kaplan et al. [31]. We compute FLOPs based on the total number of parameters, using the approximation " + }, + { + "bbox": [ + 313, + 617, + 554, + 713 + ], + "type": "inline_equation", + "content": "C = 6ND" + }, + { + "bbox": [ + 313, + 617, + 554, + 713 + ], + "type": "text", + "content": ", as adopted in prior work [2, 26]. However, we modify this estimation to suit our setup: for late-fusion models, FLOPs is computed" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 69, + 291, + 200 + ], + "blocks": [ + { + "bbox": [ + 58, + 69, + 291, + 200 + ], + "lines": [ + { + "bbox": [ + 58, + 69, + 291, + 200 + ], + "spans": [ + { + "bbox": [ + 58, + 69, + 291, + 200 + ], + "type": "image", + "image_path": "849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 208, + 295, + 274 + ], + "lines": [ + { + "bbox": [ + 55, + 208, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 55, + 208, + 295, + 274 + ], + "type": "text", + "content": "Figure 2. Scaling laws for early-fusion and late-fusion native multimodal models. Each point represents a model (300M to 3B parameters) trained on varying number of tokens (250M to 400B). We report the average cross-entropy loss on the validation sets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "content": "as " + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "inline_equation", + "content": "6(N_{v}D_{v} + ND)" + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "content": ". We consider a setup where, given a compute budget " + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "content": ", our goal is to predict the model's final performance, as well as determine the optimal number of parameters or number of training tokens. Consistent with prior studies on LLM scaling [26], we assume a power-law relationship between the final model loss and both model size " + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "inline_equation", + "content": "(N)" + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "content": " and training tokens " + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "inline_equation", + "content": "(D)" + }, + { + "bbox": [ + 55, + 278, + 296, + 361 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 365, + 294, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 365, + 294, + 389 + ], + "spans": [ + { + "bbox": [ + 129, + 365, + 294, + 389 + ], + "type": "interline_equation", + "content": "L = E + \\frac {A}{N ^ {\\alpha}} + \\frac {B}{D ^ {\\beta}}. \\tag {1}", + "image_path": "7f483ac7b50797f6577cf5a7c10d402453a3e7f5e35ae72880d054ddcaeeb21f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " represents the lowest achievable loss on the dataset, while " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "\\frac{A}{N^{\\alpha}}" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " captures the effect of increasing the number of parameters, where a larger model leads to lower loss, with the rate of improvement governed by " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": ". Similarly, " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "\\frac{B}{D^{\\beta}}" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " accounts for the benefits of a higher number of tokens, with " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " determining the rate of improvement. Additionally, we assume a linear relationship between compute budget (FLOPs) and both " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "inline_equation", + "content": "C \\propto ND" + }, + { + "bbox": [ + 55, + 389, + 296, + 498 + ], + "type": "text", + "content": "). This further leads to power-law relationships detailed in Appendix C.7." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 504, + 170, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 504, + 170, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 504, + 170, + 517 + ], + "type": "text", + "content": "2.3. Experimental setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "text", + "content": "Our models are based on the autoregressive transformer architecture [71] with SwiGLU FFNs [58] and QK-Norm [17] following Li et al. [39]. In early-fusion models, image patches are linearly projected to match the text token dimension, while late-fusion follows the CLIP architecture [55]. We adopt causal attention for text tokens and bidirectional attention for image tokens, we found this to work better. Training is conducted on a mixture of public and private multimodal datasets, including DCLM [39], Obelics [34], DFN [21], COYO [11], and a private collection of High-Quality Image-Text Pairs (HQITP). Images are resized to " + }, + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "text", + "content": " resolution with a " + }, + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "inline_equation", + "content": "14 \\times 14" + }, + { + "bbox": [ + 55, + 522, + 296, + 713 + ], + "type": "text", + "content": " patch size. We use a context length of 1k for the multimodal sequences. For training efficiency, we train our models with bfloat16, Fully Sharded Data Parallel (FSDP) [82], activation checkpointing, and gradient accumulation. We also use se" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 316, + 70, + 553, + 192 + ], + "blocks": [ + { + "bbox": [ + 316, + 70, + 553, + 192 + ], + "lines": [ + { + "bbox": [ + 316, + 70, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 316, + 70, + 553, + 192 + ], + "type": "table", + "html": "
L = E + A/Nα + B/DβN ∝ CaD ∝CbL ∝CcD ∝Nd
ModelDataEαβabcd
GPT3 [10]Text------0.048
Chinchilla [26]Text1.6930.3390.2850.460.54-
NMM (early-fusion)Text2.2220.30840.33750.52460.4774-0.04200.9085
Image-Caption1.5690.31110.33860.52030.4785-0.06100.9187
Interleaved1.9660.29710.3380.53150.4680-0.04590.8791
AVG1.9040.3010.3350.52620.473-0.04920.8987
NMM (late-fusion)AVG1.8910.29030.33830.63580.4619-0.04940.6732
Sparse NMM (early-fusion)AVG2.1580.7100.3720.3610.656-0.0471.797
", + "image_path": "bbdc82534f1740e7817c2ab64a135366e6afbf6047c72c08d9e836e9c3a7a7ee.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 194, + 553, + 237 + ], + "lines": [ + { + "bbox": [ + 313, + 194, + 553, + 237 + ], + "spans": [ + { + "bbox": [ + 313, + 194, + 553, + 237 + ], + "type": "text", + "content": "Table 2. Scaling laws for native multimodal models. We report the scaling laws results for early and late fusion models. We fit the scaling laws for different target data types as well as their average loss (AVG)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 242, + 553, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 242, + 553, + 315 + ], + "spans": [ + { + "bbox": [ + 313, + 242, + 553, + 315 + ], + "type": "text", + "content": "quence packing for the image captioning dataset to reduce the amount of padded tokens. Similar to previous works [2, 5, 26], we evaluate performance on held-out subsets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). Further implementation details are provided in Appendix A." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 327, + 502, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 502, + 341 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 502, + 341 + ], + "type": "text", + "content": "3. Scaling native multimodal models" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 348, + 554, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 348, + 554, + 420 + ], + "spans": [ + { + "bbox": [ + 313, + 348, + 554, + 420 + ], + "type": "text", + "content": "In this section, we present a scaling laws study of native multimodal models, examining various architectural choices § 3.1, exploring different data mixtures § 3.2, analyzing the practical trade-offs between late and early fusion NMMs, and comparing the performance of native pretraining and continual pre-training of NMMs § 3.3." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "spans": [ + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "text", + "content": "Setup. We train models ranging from 0.3B to 4B active parameters, scaling the width while keeping the depth constant. For smaller training token budgets, we reduce the warm-up phase to 1K steps while maintaining 5K steps for larger budgets. Following Hagele et al. [25], models are trained with a constant learning rate, followed by a cooldown phase using an inverse square root scheduler. The cool-down phase spans " + }, + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "text", + "content": " of the total steps spent at the constant learning rate. To estimate the scaling coefficients in Eq 1, we apply the L-BFGS algorithm [51] and Huber loss [28] (with " + }, + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "inline_equation", + "content": "\\delta = 10^{-3}" + }, + { + "bbox": [ + 313, + 421, + 555, + 566 + ], + "type": "text", + "content": "), performing a grid search over initialization ranges." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 575, + 441, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 575, + 441, + 588 + ], + "spans": [ + { + "bbox": [ + 313, + 575, + 441, + 588 + ], + "type": "text", + "content": "3.1. Scaling laws of NMMs" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": "Scaling laws for early-fusion and late-fusion models. Figure 2 (left) presents the final loss averaged across interleaved, image-caption, and text datasets for early-fusion NMMs. The lowest-loss frontier follows a power law as a function of FLOPs. Fitting the power law yields the expression " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "L \\propto C^{-0.049}" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": ", indicating the rate of improvement with increasing compute. When analyzing the scaling laws per data type (e.g., image-caption, interleaved, text), we observe that the exponent varies (Table 2). For instance, the model achieves a higher rate of improvement for image-" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 214, + 192 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 214, + 192 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 214, + 192 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 214, + 192 + ], + "type": "image", + "image_path": "dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 225, + 71, + 373, + 194 + ], + "blocks": [ + { + "bbox": [ + 225, + 71, + 373, + 194 + ], + "lines": [ + { + "bbox": [ + 225, + 71, + 373, + 194 + ], + "spans": [ + { + "bbox": [ + 225, + 71, + 373, + 194 + ], + "type": "image", + "image_path": "87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 389, + 71, + 538, + 194 + ], + "blocks": [ + { + "bbox": [ + 389, + 71, + 538, + 194 + ], + "lines": [ + { + "bbox": [ + 389, + 71, + 538, + 194 + ], + "spans": [ + { + "bbox": [ + 389, + 71, + 538, + 194 + ], + "type": "image", + "image_path": "ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 86, + 198, + 523, + 214 + ], + "blocks": [ + { + "bbox": [ + 86, + 198, + 523, + 214 + ], + "lines": [ + { + "bbox": [ + 86, + 198, + 523, + 214 + ], + "spans": [ + { + "bbox": [ + 86, + 198, + 523, + 214 + ], + "type": "image", + "image_path": "4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 223, + 555, + 257 + ], + "lines": [ + { + "bbox": [ + 54, + 223, + 555, + 257 + ], + "spans": [ + { + "bbox": [ + 54, + 223, + 555, + 257 + ], + "type": "text", + "content": "Figure 3. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the number of model parameters and the number of training tokens. Overall, early fusion shows a slight advantage, especially at smaller model sizes, and the gap decreases when scaling the number of parameters " + }, + { + "bbox": [ + 54, + 223, + 555, + 257 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 54, + 223, + 555, + 257 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "spans": [ + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "type": "text", + "content": "caption data " + }, + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "type": "inline_equation", + "content": "(L\\propto C^{-0.061})" + }, + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "type": "text", + "content": " when compared to interleaved documents " + }, + { + "bbox": [ + 54, + 270, + 295, + 295 + ], + "type": "inline_equation", + "content": "(L\\propto C^{-0.046})" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": "To model the loss as a function of the number of training tokens " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": " and model parameters " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ", we fit the parametric function in Eq 1, obtaining scaling exponents " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "\\alpha = 0.301" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "\\beta = 0.335" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ". These describe the rates of improvement when scaling the number of model parameters and training tokens, respectively. Assuming a linear relationship between compute, " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "C \\propto ND" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": "), we derive the law relating model parameters to the compute budget (see Appendix C for details). Specifically, for a given compute budget " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ", we compute the corresponding model size " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": " at logarithmically spaced " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": " values and determine " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "N_{opt}" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ", the parameter count that minimizes loss. Repeating this across different FLOPs values produces a dataset of " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "(C, N_{opt})" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": ", to which we fit a power law predicting the compute-optimal model size as a function of compute: " + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "inline_equation", + "content": "N^{*} \\propto C^{0.526}" + }, + { + "bbox": [ + 55, + 300, + 296, + 479 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 486, + 296, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 486, + 296, + 520 + ], + "spans": [ + { + "bbox": [ + 55, + 486, + 296, + 520 + ], + "type": "text", + "content": "Similarly, we fit power laws to estimate the compute-optimal training dataset size as a function of compute and model size:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 521, + 247, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 247, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 247, + 537 + ], + "type": "interline_equation", + "content": "D _ {o p t} \\propto C ^ {0. 4 7 3}, D _ {o p t} \\propto N ^ {0. 8 9 9}.", + "image_path": "318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "spans": [ + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "text", + "content": "These relationships allow practitioners to determine the optimal model and dataset size given a fixed compute budget. When analyzing by data type, we find that interleaved data benefits more from larger models (" + }, + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "inline_equation", + "content": "a = 0.532" + }, + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "text", + "content": ") compared to image_caption data (" + }, + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "inline_equation", + "content": "a = 0.520" + }, + { + "bbox": [ + 55, + 540, + 296, + 613 + ], + "type": "text", + "content": "), whereas the opposite trend holds for training tokens." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "content": "We conduct a similar study on late-fusion models in Figure 2 (right) and observe comparable scaling behaviors. In particular, the loss scaling exponent " + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "inline_equation", + "content": "(c = -0.0494)" + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "content": " is nearly identical to that of early fusion " + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "inline_equation", + "content": "(c = -0.0492)" + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "content": ". This trend is evident in Figure 3, where early fusion outperforms late fusion at smaller model scales, while both architectures converge to similar performance at larger model sizes. We also observe similar trends when varying late-fusion con" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 317, + 271, + 433, + 398 + ], + "blocks": [ + { + "bbox": [ + 317, + 271, + 433, + 398 + ], + "lines": [ + { + "bbox": [ + 317, + 271, + 433, + 398 + ], + "spans": [ + { + "bbox": [ + 317, + 271, + 433, + 398 + ], + "type": "image", + "image_path": "e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 409, + 555, + 443 + ], + "lines": [ + { + "bbox": [ + 313, + 409, + 555, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 409, + 555, + 443 + ], + "type": "text", + "content": "Figure 4. Early vs late: pretraining efficiency. Early-fusion is faster to train and consumes less memory. Models are trained on 16 H100 GPUs for 160k steps (300B tokens)." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 436, + 272, + 553, + 398 + ], + "blocks": [ + { + "bbox": [ + 436, + 272, + 553, + 398 + ], + "lines": [ + { + "bbox": [ + 436, + 272, + 553, + 398 + ], + "spans": [ + { + "bbox": [ + 436, + 272, + 553, + 398 + ], + "type": "image", + "image_path": "7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 447, + 555, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 447, + 555, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 447, + 555, + 472 + ], + "type": "text", + "content": "figurations, such as using a smaller vision encoder with a larger text decoder Appendix B." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": "Scaling laws of NMMs vs LLMs. Upon comparing the scaling law coefficients of our NMMs to those reported for text-only LLMs (e.g., GPT-3, Chinchilla), we find them to be within similar ranges. In particular, for predicting the loss as a function of compute, GPT-3 [10] follows " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "L \\propto C^{-0.048}" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": ", while our models follow " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "L \\propto C^{-0.049}" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": ", suggesting that the performance of NMMs adheres to similar scaling laws as LLMs. Similarly, our estimates of the " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " parameters in Eq 1 (" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\alpha = 0.301" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\beta = 0.335" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": ") closely match those reported by Hoffmann et al. [26] (" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\alpha = 0.339" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\beta = 0.285" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": "). Likewise, our computed values of " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "a = 0.526" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "b = 0.473" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " align closely with " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "a = 0.46" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "b = 0.54" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " from [26], reinforcing the idea that, for native multimodal models, the number of training tokens and model parameters should be scaled proportionally. However, since the gap between " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " is smaller than in LLMs, this principle holds even more strongly for NMMs. Additionally, as " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "a = 0.526" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " is greater than " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "b = 0.473" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " in our case, the optimal model size for NMMs is larger than that of LLMs," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 72, + 549, + 210 + ], + "blocks": [ + { + "bbox": [ + 63, + 72, + 549, + 210 + ], + "lines": [ + { + "bbox": [ + 63, + 72, + 549, + 210 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 549, + 210 + ], + "type": "image", + "image_path": "2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 214, + 555, + 237 + ], + "lines": [ + { + "bbox": [ + 55, + 214, + 555, + 237 + ], + "spans": [ + { + "bbox": [ + 55, + 214, + 555, + 237 + ], + "type": "text", + "content": "Figure 5. Scaling laws with different training mixtures. Early-fusion models follow similar scaling trends when changing the pretraining mixtures. However, increasing the image captions leads to a higher scaling exponent norm (see Table 3)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 57, + 249, + 298, + 304 + ], + "blocks": [ + { + "bbox": [ + 57, + 249, + 298, + 304 + ], + "lines": [ + { + "bbox": [ + 57, + 249, + 298, + 304 + ], + "spans": [ + { + "bbox": [ + 57, + 249, + 298, + 304 + ], + "type": "table", + "html": "
C-I-T (%)I/T ratioEαβabdc
145-45-101.191.9060.3010.3350.5270.4740.901-0.0492
240-20-400.651.9650.3280.3480.5180.4860.937-0.0486
330-30-400.591.8470.2530.3380.5720.4280.748-0.0463
420-40-400.491.8360.2590.3540.5820.4230.726-0.0488
", + "image_path": "ecb582a4915e09f0f5d7f795e92882a1d6a26f88d15c6eb5d012aeb43340d06e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 304, + 296, + 327 + ], + "lines": [ + { + "bbox": [ + 55, + 304, + 296, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 304, + 296, + 327 + ], + "type": "text", + "content": "Table 3. Scaling laws for different training mixtures. Early-fusion models. C-I-T refer to image-caption, interleaved and text" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 331, + 296, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 331, + 296, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 296, + 357 + ], + "type": "text", + "content": "while the optimal number of training tokens is lower, given a fixed compute budget." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "spans": [ + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "content": "Compute-optimal trade-offs for early vs. late fusion NMMs. While late- and early-fusion models reduce loss at similar rates with increasing FLOPs, we observe distinct trade-offs in their compute-optimal models. Specifically, " + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "inline_equation", + "content": "N_{opt}" + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "content": " is larger for late-fusion models, whereas " + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "inline_equation", + "content": "D_{opt}" + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "content": " is larger for early-fusion models. This indicates that, given a fixed compute budget, late-fusion models require a higher number of parameters, while early-fusion models benefit more from a higher number of training tokens. This trend is also reflected in the lower " + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "inline_equation", + "content": "\\frac{N_{opt}}{D_{opt}} \\propto C^{0.053}" + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "content": " for early fusion compared to " + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "inline_equation", + "content": "\\frac{N_{opt}}{D_{opt}} \\propto C^{0.076}" + }, + { + "bbox": [ + 54, + 375, + 296, + 562 + ], + "type": "text", + "content": " for late fusion. As shown in Figure 1 (bottom), when scaling FLOPs, the number of parameters of early fusion models becomes significantly lower, which is crucial for reducing inference costs and, consequently, lowering serving costs after deployment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 582, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 582, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 582, + 296, + 715 + ], + "type": "text", + "content": "Early-fusion is more efficient to train. We compare the training efficiency of late- and early-fusion architectures. As shown in Figure 4, early-fusion models consume less memory and train faster under the same compute budget. This advantage becomes even more pronounced as compute increases, highlighting the superior training efficiency of early fusion while maintaining comparable performance to late fusion at scale. Notably, for the same FLOPs, late-fusion models have a higher parameter count and higher effective depth (i.e., additional vision encoder layers alongside decoder layers) compared to early-fusion models." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 251, + 434, + 372 + ], + "blocks": [ + { + "bbox": [ + 317, + 251, + 434, + 372 + ], + "lines": [ + { + "bbox": [ + 317, + 251, + 434, + 372 + ], + "spans": [ + { + "bbox": [ + 317, + 251, + 434, + 372 + ], + "type": "image", + "image_path": "20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 380, + 555, + 425 + ], + "lines": [ + { + "bbox": [ + 313, + 380, + 555, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 555, + 425 + ], + "type": "text", + "content": "Figure 7. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models attain a favorable performance when increasing the proportion of interleaved documents and text-only data." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 437, + 251, + 539, + 372 + ], + "blocks": [ + { + "bbox": [ + 437, + 251, + 539, + 372 + ], + "lines": [ + { + "bbox": [ + 437, + 251, + 539, + 372 + ], + "spans": [ + { + "bbox": [ + 437, + 251, + 539, + 372 + ], + "type": "image", + "image_path": "30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 429, + 522, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 429, + 522, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 429, + 522, + 441 + ], + "type": "text", + "content": "3.2. Scaling laws for different data mixtures" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "text", + "content": "We investigate how variations in the training mixture affect the scaling laws of native multimodal models. To this end, we study four different mixtures that reflect common community practices [34, 41, 46, 81], with Image Caption-Interleaved-Text ratios of 45-45-10 (our default setup), 30-30-40, 40-20-40, and 20-40-40. For each mixture, we conduct a separate scaling study by training 76 different models, following our setup in § 3.1. Overall, Figure 5 shows that different mixtures follow similar scaling trends; however, the scaling coefficients vary depending on the mixture (Table 3). Interestingly, increasing the proportion of image-caption data (mixtures 1 and 2) leads to lower " + }, + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "text", + "content": " and higher " + }, + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 449, + 556, + 715 + ], + "type": "text", + "content": ", whereas increasing the ratio of interleaved and text data (mixtures 3 and 4) have the opposite effect. Notably, image-caption data contains more image tokens than text tokens; therefore, increasing its proportion results in more image tokens, while increasing interleaved and text data increases text token counts. This suggests that, when image tokens are prevalent, training for longer decreases the loss faster than increasing the model size. We also found that for a fixed model size, increasing text-only and interleaved data ratio is in favor of early-fusion Figure 7." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 70, + 303, + 199 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 303, + 199 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 303, + 199 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 303, + 199 + ], + "type": "image", + "image_path": "38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 203, + 295, + 237 + ], + "lines": [ + { + "bbox": [ + 55, + 203, + 295, + 237 + ], + "spans": [ + { + "bbox": [ + 55, + 203, + 295, + 237 + ], + "type": "text", + "content": "Figure 8. Early native vs initializing from LLMs: initializing from pre-trained models and scaling training tokens. We compare training with and without initializing from DCLM-1B." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 240, + 296, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 240, + 296, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 240, + 296, + 266 + ], + "type": "text", + "content": "3.3. Native multimodal pre-training vs. continual training of LLMs" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 270, + 296, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 270, + 296, + 426 + ], + "spans": [ + { + "bbox": [ + 54, + 270, + 296, + 426 + ], + "type": "text", + "content": "In this section, we compare training natively from scratch to continual training after initializing from a pre-trained LLM. We initialize the model from DCLM-1B [21] that is trained on more than 2T tokens. Figure 8 shows that native multimodal models can close the gap with initialized models when trained for longer. Specifically, on image captioning data, the model requires fewer than 100B multimodal tokens to reach comparable performance. However, on interleaved and text data, the model may need longer training—up to 1T tokens. Considering the cost of pre-training, these results suggest that training natively could be a more efficient approach for achieving the same performance on multimodal benchmarks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 441, + 250, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 441, + 250, + 454 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 250, + 454 + ], + "type": "text", + "content": "4. Towards multimodal specialization" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 461, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 461, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 461, + 295, + 605 + ], + "type": "text", + "content": "Previously, we demonstrated that early-fusion models achieve performance on par with late-fusion models under a fixed compute budget. However, multimodal data is inherently heterogeneous, and training a unified model to fit such diverse distributions may be suboptimal. Here, we argue for multimodal specialization within a unified architecture. Ideally, the model should implicitly adapt to each modality, for instance, by learning modality-specific weights or specialized experts. Mixture of Experts is a strong candidate for this approach, having demonstrated effectiveness in LLMs. In this section, we highlight the advantages of sparse early-fusion models over their dense counterparts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": "Setup. Our sparse models are based on the dropless-MoE implementation of Gale et al. [24], which eliminates token dropping during training caused by expert capacity constraints. We employ a top-" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": " expert-choice routing mechanism, where each token selects its top-" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": " experts among the " + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": " available experts. Specifically, we set " + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "inline_equation", + "content": "E = 8" + }, + { + "bbox": [ + 55, + 605, + 295, + 714 + ], + "type": "text", + "content": ", as we find this configuration to work effectively. Additionally, we incorporate an auxiliary load-balancing loss [59] with a weight of 0.01 to ensure a balanced expert utilization." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 68, + 537, + 194 + ], + "blocks": [ + { + "bbox": [ + 317, + 68, + 537, + 194 + ], + "lines": [ + { + "bbox": [ + 317, + 68, + 537, + 194 + ], + "spans": [ + { + "bbox": [ + 317, + 68, + 537, + 194 + ], + "type": "image", + "image_path": "869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "lines": [ + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "type": "text", + "content": "Figure 9. Scaling laws for sparse early-fusion NMMs. We report the final validation loss averaged across interleaved, image-captions and text data." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "text", + "content": "Following Abnar et al. [2], we compute training FLOPs as " + }, + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "inline_equation", + "content": "6ND" + }, + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 232, + 553, + 258 + ], + "type": "text", + "content": " represents the number of active parameters." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 264, + 545, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 264, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 313, + 264, + 545, + 277 + ], + "type": "text", + "content": "4.1. Sparse vs dense NMMs when scaling FLOPs" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "type": "text", + "content": "We compare sparse MoE models to their dense counterparts by training models with different numbers of active parameters and varying amounts of training tokens. Figure 10 shows that, under the same inference cost (or number of active parameters), MoEs significantly outperform dense models. Interestingly, this performance gap is more pronounced for smaller model sizes. This suggests that MoEs enable models to handle heterogeneous data more effectively and specialize in different modalities. However, as dense models become sufficiently large, the gap between the two architectures gradually closes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 420, + 538, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 538, + 433 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 538, + 433 + ], + "type": "text", + "content": "4.2. Scaling laws for sparse early-fusion models" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": "We train different models (ranging from 300M to 3.4B active parameters) on varying amounts of tokens (ranging from 250M to 600B) and report the final loss in Figure 9. We fit a power law to the convex hull of the lowest loss as a function of compute (FLOPs). Interestingly, the exponent " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "(-0.048)" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": " is close to that of dense NMMs " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "(-0.049)" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": ", indicating that both architectures scale similarly. However, the multiplicative constant is smaller for MoEs (27.086) compared to dense models (29.574), revealing lower loss. Additionally, MoEs require longer training to reach saturation compared to dense models (Appendix C for more details). We also predict the coefficients of Eq 1 by considering " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": " as the number of active parameters. Table 2 shows significantly higher " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": " compared to dense models. Interestingly, " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": " is significantly higher than " + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 313, + 437, + 555, + 666 + ], + "type": "text", + "content": ", revealing that the training tokens should be scaled at a higher rate than the number of parameters when training sparse NMMs. We also experiment with a scaling law that takes into account the sparsity [2] and reached similar conclusions in Appendix C.7." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 672, + 550, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 672, + 550, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 672, + 550, + 685 + ], + "type": "text", + "content": "4.3. Modality-aware vs. Modality-agnostic routing" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "content": "Another alternative to MoEs is modality-aware routing, where multimodal tokens are assigned to experts based on" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 71, + 286, + 223 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 286, + 223 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 286, + 223 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 286, + 223 + ], + "type": "image", + "image_path": "3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 232, + 295, + 275 + ], + "lines": [ + { + "bbox": [ + 55, + 232, + 295, + 275 + ], + "spans": [ + { + "bbox": [ + 55, + 232, + 295, + 275 + ], + "type": "text", + "content": "Figure 10. MoE vs Dense: scaling training FLOPs. We compare MoE and dense early-fusion models when scaling both the amount of training tokens and model sizes. MoEs beat dense models when matching the number of active parameters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 280, + 296, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 280, + 296, + 364 + ], + "spans": [ + { + "bbox": [ + 55, + 280, + 296, + 364 + ], + "type": "text", + "content": "their modalities, similar to previous works [7, 75]. We train models with distinct image and text experts in the form of FFNs, where image tokens are processed only by the image FFN and text tokens only by the text FFN. Compared to modality-aware routing, MoEs exhibit significantly better performance on both image-caption and interleaved data as presented in Figure 11." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 391, + 296, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 391, + 296, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 391, + 296, + 403 + ], + "type": "text", + "content": "4.4. Emergence of expert specialization and sharing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "text", + "content": "We investigate multimodal specialization in MoE architectures. In Figure 13, we visualize the normalized number of text and image tokens assigned to each expert across layers. To quantify this specialization, we compute a specialization score, defined as the average, across all experts within a layer, of " + }, + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "inline_equation", + "content": "1 - H(p)" + }, + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 55, + 415, + 296, + 713 + ], + "type": "text", + "content": " is the binary entropy of each expert's text/image token distribution. We plot this specialization score in Figure 12. Higher specialization scores indicate a tendency for experts to focus on either text or image tokens, while lower scores indicate a shared behavior. These visualizations provide clear evidence of modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases, before rising again in the last layers. This suggests that early and final layers exhibit higher modality specialization compared to mid-layers. This behavior is intuitive, as middle layers are expected to hold higher-level features that may generalize across modalities, and consistent with findings in [61] that shows increasing alignment between modalities across layers. The emergence of both expert specialization and cross-modality sharing in our modality-agnostic MoE, suggests it may be a preferable approach compared to modality-aware sparsity. All data displayed here is from an early-fusion MoE model with 1B active parameters trained for 300B tokens." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 317, + 72, + 553, + 128 + ], + "blocks": [ + { + "bbox": [ + 317, + 72, + 553, + 128 + ], + "lines": [ + { + "bbox": [ + 317, + 72, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 553, + 128 + ], + "type": "table", + "html": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
Late-fusion46.869.425.850.165.822.870.750.9
Early-fusion47.669.328.152.165.423.272.053.8
Early-MoEs48.269.830.052.165.423.669.655.7
", + "image_path": "ca37c4a7a1a0179f3629d000ae32bcaef96674560f551745f099a33c45e3a31c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 129, + 553, + 152 + ], + "lines": [ + { + "bbox": [ + 314, + 129, + 553, + 152 + ], + "spans": [ + { + "bbox": [ + 314, + 129, + 553, + 152 + ], + "type": "text", + "content": "Table 4. Supervised finetuning on the LLaVA mixture. All models are native at 1.5B scale and pre-trained on 300B tokens." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 316, + 163, + 436, + 281 + ], + "blocks": [ + { + "bbox": [ + 316, + 163, + 436, + 281 + ], + "lines": [ + { + "bbox": [ + 316, + 163, + 436, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 163, + 436, + 281 + ], + "type": "image", + "image_path": "df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 438, + 164, + 542, + 281 + ], + "blocks": [ + { + "bbox": [ + 438, + 164, + 542, + 281 + ], + "lines": [ + { + "bbox": [ + 438, + 164, + 542, + 281 + ], + "spans": [ + { + "bbox": [ + 438, + 164, + 542, + 281 + ], + "type": "image", + "image_path": "6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 288, + 552, + 312 + ], + "blocks": [ + { + "bbox": [ + 316, + 288, + 552, + 312 + ], + "lines": [ + { + "bbox": [ + 316, + 288, + 552, + 312 + ], + "spans": [ + { + "bbox": [ + 316, + 288, + 552, + 312 + ], + "type": "image", + "image_path": "c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 321, + 554, + 365 + ], + "lines": [ + { + "bbox": [ + 313, + 321, + 554, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 321, + 554, + 365 + ], + "type": "text", + "content": "Figure 11. Modality-aware vs modality agnostic routing for sparse NMMs. We compare modality-agnostic routing with modality-aware routing when scaling both the amount of training tokens and model sizes." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 369, + 545, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 369, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 369, + 545, + 380 + ], + "type": "text", + "content": "5. Evaluation on downstream tasks with SFT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 390, + 555, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 390, + 555, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 390, + 555, + 546 + ], + "type": "text", + "content": "Following previous work on scaling laws, we primarily rely on validation losses. However, we generally find that this evaluation correlates well with performance on downstream tasks. To validate this, we conduct a multimodal instruction tuning stage (SFT) on the LLaVA mixture [43] and report accuracy and CIDEr scores across several VQA and captioning tasks. Table 4 confirms the ranking of different model configurations. Specifically, early fusion outperforms late fusion, and MoEs outperform dense models. However, since the models are relatively small (1.5B scale), trained from scratch, and fine-tuned on a small dataset, the overall scores are lower than the current state of the art. Further implementation details can be found in Appendix A." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 560, + 398, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 560, + 398, + 572 + ], + "spans": [ + { + "bbox": [ + 313, + 560, + 398, + 572 + ], + "type": "text", + "content": "6. Related work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "content": "Large multimodal models. A long-standing research goal has been to develop models capable of perceiving the world through multiple modalities, akin to human sensory experience. Recent progress in vision and language processing has shifted the research focus from smaller, task-specific models toward large, generalist models that can handle diverse inputs [29, 67]. Crucially, pre-trained vision and language backbones often require surprisingly little adaptation to enable effective cross-modal communication [32, 47, 62, 68, 69]. Simply integrating a vision encoder with either an encoder-decoder architecture [45, 48, 63, 72]" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 291, + 190 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 291, + 190 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 291, + 190 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 291, + 190 + ], + "type": "image", + "image_path": "4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 197, + 296, + 275 + ], + "lines": [ + { + "bbox": [ + 55, + 197, + 296, + 275 + ], + "spans": [ + { + "bbox": [ + 55, + 197, + 296, + 275 + ], + "type": "text", + "content": "Figure 12. MoE specialization score. Entropy-based image/text specialization score (as described in § 4.4) across layers for two data sources: HQITP and Obelics. HQITP has a more imbalanced image-to-text token distribution, resulting in generally higher specialization. Despite this difference, both data sources exhibit a similar trend: the specialization score decreases in the early layers before increasing again in the final layers." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 277, + 295, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 295, + 422 + ], + "type": "text", + "content": "or a decoder-only LLM has yielded highly capable multimodal systems [1, 6, 9, 13, 16, 35, 43, 49, 64, 73, 78, 83]. This late-fusion approach, where modalities are processed separately before being combined, is now well-understood, with established best practices for training effective models [34, 41, 46, 81]. In contrast, early-fusion models [8, 18, 66], which combine modalities at an earlier stage, remain relatively unexplored, with only a limited number of publicly released models [8, 18]. Unlike [18, 66], our models utilize only a single linear layer and rely exclusively on a next-token prediction loss. Furthermore, we train our models from scratch on all modalities without image tokenization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 424, + 295, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 424, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 424, + 295, + 568 + ], + "type": "text", + "content": "Native Multimodal Models. We define native multimodal models as those trained from scratch on all modalities simultaneously [67] rather than adapting LLMs to accommodate additional modalities. Due to the high cost of training such models, they remain relatively underexplored, with most relying on late-fusion architectures [27, 79]. Some multimodal models trained from scratch [4, 66, 76] relax this constraint by utilizing pre-trained image tokenizers such as [20, 70] to convert images into discrete tokens, integrating them into the text vocabulary. This approach enables models to understand and generate text and images, facilitating a more seamless multimodal learning process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 570, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 295, + 714 + ], + "type": "text", + "content": "Scaling laws. Scaling law studies aim to predict how model performance scales with training compute. Early works [26, 31] found that LLM performance follows a power-law relationship with compute, enabling the compute-optimal estimation of the number of model parameters and training tokens at scale for a given budget. Similar research has extended these findings to sparse Mixture of Experts (MoE) models, considering factors such as sparsity, number of experts, and routing granularity [15, 33, 74]. Scaling laws have also been observed across various domains, including image models [23], video models [56], protein LLMs [14], and imitation learning [54]. However, few stud" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 316, + 71, + 397, + 137 + ], + "blocks": [ + { + "bbox": [ + 316, + 71, + 397, + 137 + ], + "lines": [ + { + "bbox": [ + 316, + 71, + 397, + 137 + ], + "spans": [ + { + "bbox": [ + 316, + 71, + 397, + 137 + ], + "type": "image", + "image_path": "f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 139, + 555, + 183 + ], + "lines": [ + { + "bbox": [ + 313, + 139, + 555, + 183 + ], + "spans": [ + { + "bbox": [ + 313, + 139, + 555, + 183 + ], + "type": "text", + "content": "Figure 13. MoE specialization frequency. Percentage of text and image tokens routed to each expert on interleaved data from Obelics. Experts are ordered for better visualization. The first layer shows the highest amount of unimodal experts." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 400, + 71, + 474, + 137 + ], + "blocks": [ + { + "bbox": [ + 400, + 71, + 474, + 137 + ], + "lines": [ + { + "bbox": [ + 400, + 71, + 474, + 137 + ], + "spans": [ + { + "bbox": [ + 400, + 71, + 474, + 137 + ], + "type": "image", + "image_path": "21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 477, + 71, + 549, + 137 + ], + "blocks": [ + { + "bbox": [ + 477, + 71, + 549, + 137 + ], + "lines": [ + { + "bbox": [ + 477, + 71, + 549, + 137 + ], + "spans": [ + { + "bbox": [ + 477, + 71, + 549, + 137 + ], + "type": "image", + "image_path": "5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 187, + 555, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 187, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 187, + 555, + 258 + ], + "type": "text", + "content": "ies have investigated scaling laws for multimodal models. Notably, Aghajanyan et al. [5] examined multimodal models that tokenize modalities into discrete tokens and include multimodal generation. In contrast, we focus on studying early-fusion models that take raw multimodal inputs and are trained on interleaved multimodal data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 264, + 556, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 264, + 556, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 264, + 556, + 372 + ], + "type": "text", + "content": "Mixture of experts (MoEs). MoEs [59] scale model capacity efficiently by sparsely activating parameters, enabling large models with reduced per-sample compute. While widely studied in LLMs [22, 30, 36, 37, 42, 65, 77, 84], MoEs remain underexplored in multimodal settings. Prior work has examined contrastive models [50], late-fusion LLMs [38, 40], and modality-specific experts [7, 12, 60]. We focus on analyzing MoEs in early-fusion multimodal models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 398, + 389, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 398, + 389, + 410 + ], + "spans": [ + { + "bbox": [ + 314, + 398, + 389, + 410 + ], + "type": "text", + "content": "7. Limitations" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 423, + 556, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 423, + 556, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 423, + 556, + 533 + ], + "type": "text", + "content": "Our study finds that scaling law coefficients are broadly consistent across training mixtures, though a broader exploration is needed to validate this observation. While validation loss scales predictably with compute, the extent to which this correlates with downstream performance remains unclear and warrants further investigation. The accuracy of scaling law predictions improves with higher FLOPs, but their extrapolation to extreme model sizes is still an open question (Appendix D for more details)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 556, + 388, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 388, + 569 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 388, + 569 + ], + "type": "text", + "content": "8. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "content": "We explore various strategies for compute-optimal pretraining of native multimodal models. We found the NMMs follow similar scaling laws to those of LLMs. Contrary to common belief, we find no inherent advantage in adopting late-fusion architectures over early-fusion ones. While both architectures exhibit similar scaling properties, early-fusion models are more efficient to train and outperform late-fusion models at lower compute budgets. Furthermore, we show that sparse architectures encourage modality-specific specialization, leading to performance improvements while maintaining the same inference cost." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "type": "text", + "content": "Acknowledgment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 175 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 175 + ], + "type": "text", + "content": "We thank Philipp Dufter, Samira Abnar, Xiujun Li, Zhe Gan, Alexander Toshev, Yinfei Yang, Dan Busbridge, and Jason Ramapuram for many fruitful discussions. We thank Denise Hui, and Samy Bengio for infra and compute support. Finally, we thank, Louis Bethune, Pierre Ablin, Marco Cuturi, and the MLR team at Apple for their support throughout the project." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 188, + 115, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 188, + 115, + 201 + ], + "spans": [ + { + "bbox": [ + 56, + 188, + 115, + 201 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 208, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 61, + 208, + 296, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 208, + 296, + 262 + ], + "spans": [ + { + "bbox": [ + 61, + 208, + 296, + 262 + ], + "type": "text", + "content": "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 264, + 296, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 264, + 296, + 319 + ], + "spans": [ + { + "bbox": [ + 62, + 264, + 296, + 319 + ], + "type": "text", + "content": "[2] Samira Abnar, Harshay Shah, Dan Busbridge, Alaaeldin Mohamed Elnouby Ali, Josh Susskind, and Vimal Thilak. Parameters vs flops: Scaling laws for optimal sparsity for mixture-of-experts language models. arXiv preprint arXiv:2501.12370, 2025. 2, 3, 6, 18, 20" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 321, + 296, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 321, + 296, + 374 + ], + "spans": [ + { + "bbox": [ + 62, + 321, + 296, + 374 + ], + "type": "text", + "content": "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 376, + 296, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 376, + 296, + 431 + ], + "spans": [ + { + "bbox": [ + 62, + 376, + 296, + 431 + ], + "type": "text", + "content": "[4] Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 434, + 296, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 434, + 296, + 498 + ], + "spans": [ + { + "bbox": [ + 62, + 434, + 296, + 498 + ], + "type": "text", + "content": "[5] Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In International Conference on Machine Learning, pages 265-279. PMLR, 2023. 3, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 501, + 296, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 501, + 296, + 566 + ], + "spans": [ + { + "bbox": [ + 62, + 501, + 296, + 566 + ], + "type": "text", + "content": "[6] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 1, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 567, + 296, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 567, + 296, + 622 + ], + "spans": [ + { + "bbox": [ + 62, + 567, + 296, + 622 + ], + "type": "text", + "content": "[7] Hangbo Bao, Wenhui Wang, Li Dong, Qiang Liu, Owais Khan Mohammed, Kriti Aggarwal, Subhojit Som, and Furu Wei. Vlmo: Unified vision-language pretraining with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358, 2021. 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 624, + 296, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 624, + 296, + 657 + ], + "spans": [ + { + "bbox": [ + 62, + 624, + 296, + 657 + ], + "type": "text", + "content": "[8] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Săgnak Taşirlar. Introducing our multimodal models, 2023. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 658, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 658, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 658, + 296, + 713 + ], + "type": "text", + "content": "[9] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1, 8" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 128 + ], + "type": "text", + "content": "[10] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3, 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 129, + 553, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 553, + 172 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 553, + 172 + ], + "type": "text", + "content": "[11] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3, 13" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 173, + 553, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 173, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 317, + 173, + 553, + 228 + ], + "type": "text", + "content": "[12] Junyi Chen, Longteng Guo, Jia Sun, Shuai Shao, Zehuan Yuan, Liang Lin, and Dongyu Zhang. Eve: Efficient vision-language pre-training with masked prediction and modality-aware moe. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1110-1119, 2024. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 228, + 553, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 228, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 317, + 228, + 553, + 293 + ], + "type": "text", + "content": "[13] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 294, + 553, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 294, + 553, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 294, + 553, + 326 + ], + "type": "text", + "content": "[14] Xingyi Cheng, Bo Chen, Pan Li, Jing Gong, Jie Tang, and Le Song. Training compute-optimal protein language models. bioRxiv, 2024. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 327, + 553, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 327, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 317, + 327, + 553, + 392 + ], + "type": "text", + "content": "[15] Aidan Clark, Diego de Las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bogdan Damoc, Blake Hechtman, Trevor Cai, Sebastian Borgeaud, et al. Unified scaling laws for routed language models. In International conference on machine learning, pages 4057-4086. PMLR, 2022. 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 394, + 553, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 394, + 553, + 448 + ], + "spans": [ + { + "bbox": [ + 317, + 394, + 553, + 448 + ], + "type": "text", + "content": "[16] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024.8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 449, + 553, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 449, + 553, + 514 + ], + "spans": [ + { + "bbox": [ + 317, + 449, + 553, + 514 + ], + "type": "text", + "content": "[17] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 515, + 553, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 515, + 553, + 557 + ], + "spans": [ + { + "bbox": [ + 317, + 515, + 553, + 557 + ], + "type": "text", + "content": "[18] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. arXiv preprint arXiv:2406.11832, 2024.8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 559, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 559, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 559, + 553, + 613 + ], + "type": "text", + "content": "[19] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 614, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 614, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 614, + 553, + 668 + ], + "type": "text", + "content": "[20] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12873-12883, 2021. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "text", + "content": "[21] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 3, 6, 13" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[22] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "type": "text", + "content": "[23] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders, 2024. 1, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "text", + "content": "[24] Trevor Gale, Deepak Narayanan, Cliff Young, and Matei Zaharia. Megablocks: Efficient sparse training with mixture-of-experts. Proceedings of Machine Learning and Systems, 5:288-304, 2023. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 242, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 242, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 242, + 294, + 285 + ], + "type": "text", + "content": "[25] Alexander Hagele, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "type": "text", + "content": "[26] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, pages 30016-30030, 2022. 2, 3, 4, 8, 17" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 365, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 430 + ], + "type": "text", + "content": "[27] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, et al. Language is not all you need: Aligning perception with language models. Advances in Neural Information Processing Systems, 36:72096-72109, 2023. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 432, + 294, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 432, + 294, + 454 + ], + "spans": [ + { + "bbox": [ + 56, + 432, + 294, + 454 + ], + "type": "text", + "content": "[28] Peter J. Huber. Robust Estimation of a Location Parameter, pages 492-518. Springer New York, New York, NY, 1992. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 456, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 456, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 456, + 294, + 498 + ], + "type": "text", + "content": "[29] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 501, + 294, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 501, + 294, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 501, + 294, + 555 + ], + "type": "text", + "content": "[30] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "spans": [ + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "type": "text", + "content": "[31] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 2, 8, 15" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "spans": [ + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "type": "text", + "content": "[32] Jing Yu Koh, Ruslan Salakhutdinov, and Daniel Fried. Grounding language models to images for multimodal inputs and outputs. In International Conference on Machine Learning, pages 17283-17300. PMLR, 2023. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "text", + "content": "[33] Jakub Krajewski, Jan Ludziejewski, Kamil Adamczewski, Maciej Pioro, Michal Krutul, Szymon Antoniak, Kamil Ciebiera, Krystian Król, Tomasz Odrzygoźdź, Piotr Sankowski, et al. Scaling laws for fine-grained mixture of experts. arXiv preprint arXiv:2402.07871, 2024. 8, 18" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 139 + ], + "type": "text", + "content": "[34] Hugo Laurencon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024. 3, 5, 8, 13" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 141, + 553, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 553, + 173 + ], + "type": "text", + "content": "[35] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? arXiv preprint arXiv:2405.02246, 2024. 1, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "type": "text", + "content": "[36] Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668, 2020. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "type": "text", + "content": "[37] Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. Base layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265-6274. PMLR, 2021. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "type": "text", + "content": "[38] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 321, + 553, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 321, + 553, + 375 + ], + "spans": [ + { + "bbox": [ + 316, + 321, + 553, + 375 + ], + "type": "text", + "content": "[39] Jeffrey Li, Alex Fang, Georgios Smyrnis, Maor Ivgi, Matt Jordan, Samir Gadre, Hritik Bansal, Etash Guha, Sedrick Keh, Kushal Arora, et al. Datacomp-lm: In search of the next generation of training sets for language models. arXiv preprint arXiv:2406.11794, 2024. 3, 13, 15" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 376, + 553, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 376, + 553, + 421 + ], + "spans": [ + { + "bbox": [ + 316, + 376, + 553, + 421 + ], + "type": "text", + "content": "[40] Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947, 2024. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 422, + 553, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 553, + 475 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 553, + 475 + ], + "type": "text", + "content": "[41] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 5, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "type": "text", + "content": "[42] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "text", + "content": "[43] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 1, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 568, + 553, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 553, + 590 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 553, + 590 + ], + "type": "text", + "content": "[44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 13" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 591, + 553, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 591, + 553, + 645 + ], + "spans": [ + { + "bbox": [ + 316, + 591, + 553, + 645 + ], + "type": "text", + "content": "[45] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. In The Eleventh International Conference on Learning Representations, 2022. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "text", + "content": "[46] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Duffer, Dhruti Shah, Xianzhi Du, Futang Peng, Anton Belyi, et al. Mm1: methods, analysis and insights from multimodal llm pre-training. In European Conference on Computer Vision, pages 304–323. Springer, 2025. 5, 8, 13" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 115 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 115 + ], + "type": "text", + "content": "[47] Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. Linearly mapping from image to text space. In *The Eleventh International Conference on Learning Representations*, 2023. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "text", + "content": "[48] David Mizrahi, Roman Bachmann, Oguzhan Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling. Advances in Neural Information Processing Systems, 36:58363-58408, 2023. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 175, + 294, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 251 + ], + "type": "text", + "content": "[49] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 252, + 294, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 252, + 294, + 306 + ], + "spans": [ + { + "bbox": [ + 56, + 252, + 294, + 306 + ], + "type": "text", + "content": "[50] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. Advances in Neural Information Processing Systems, 35:9564-9576, 2022. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 308, + 294, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 308, + 294, + 340 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 294, + 340 + ], + "type": "text", + "content": "[51] Jorge Nocedal. Updating quasi newton matrices with limited storage. Mathematics of Computation, 35(151):951-958, 1980. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "type": "text", + "content": "[52] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 399, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 294, + 430 + ], + "type": "text", + "content": "[53] Tim Pearce and Jinyeop Song. Reconciling kaplan and chinchilla scaling laws. arXiv preprint arXiv:2406.12907, 2024. 15" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 433, + 294, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 433, + 294, + 476 + ], + "spans": [ + { + "bbox": [ + 56, + 433, + 294, + 476 + ], + "type": "text", + "content": "[54] Tim Pearce, Tabish Rashid, Dave Bignell, Raluca Georgescu, Sam Devlin, and Katja Hofmann. Scaling laws for pre-training agents and world models. arXiv preprint arXiv:2411.04434, 2024. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 478, + 294, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 478, + 294, + 543 + ], + "spans": [ + { + "bbox": [ + 56, + 478, + 294, + 543 + ], + "type": "text", + "content": "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 1, 3, 15" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "type": "text", + "content": "[56] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pretraining from videos. arXiv preprint arXiv:2501.05453, 2025.8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 601, + 294, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 294, + 656 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 294, + 656 + ], + "type": "text", + "content": "[57] Kanchana Ranasinghe, Brandon McKinzie, Sachin Ravi, Yinfei Yang, Alexander Toshev, and Jonathon Shlens. Perceptual grouping in contrastive vision-language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5571-5584, 2023. 13" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 658, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 678 + ], + "type": "text", + "content": "[58] Noam Shazeer. Glu variants improve transformer. arXiv preprint arXiv:2002.05202, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "text", + "content": "[59] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 94 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 94 + ], + "type": "text", + "content": "of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 6, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 95, + 553, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 95, + 553, + 148 + ], + "spans": [ + { + "bbox": [ + 316, + 95, + 553, + 148 + ], + "type": "text", + "content": "[60] Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, and Yuxiong He. Scaling vision-language models with sparse mixture of experts. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 150, + 553, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 150, + 553, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 150, + 553, + 194 + ], + "type": "text", + "content": "[61] Mustafa Shukor and Matthieu Cord. Implicit multimodal alignment: On the generalization of frozen llms to multimodal inputs. Advances in Neural Information Processing Systems, 37:130848-130886, 2024. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 195, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 195, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 195, + 553, + 239 + ], + "type": "text", + "content": "[62] Mustafa Shukor, Corentin Dancette, and Matthieu Cord. eplalm: Efficient perceptual augmentation of language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22056-22069, 2023. 1, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 240, + 553, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 240, + 553, + 282 + ], + "spans": [ + { + "bbox": [ + 316, + 240, + 553, + 282 + ], + "type": "text", + "content": "[63] Mustafa Shukor, Corentin Dancette, Alexandre Rame, and Matthieu Cord. Unival: Unified model for image, video, audio and language tasks. Transactions on Machine Learning Research Journal, 2023. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 284, + 553, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 284, + 553, + 348 + ], + "spans": [ + { + "bbox": [ + 316, + 284, + 553, + 348 + ], + "type": "text", + "content": "[64] Mustafa Shukor, Dana Aubakirova, Francesco Capuano, Pepijn Kooijmans, Steven Palma, Adil Zoutine, Michel Ar-actingi, Caroline Pascal, Martino Russi, Andres Marafioti, et al. Smolvla: A vision-language-action model for affordable and efficient robotics. arXiv preprint arXiv:2506.01844, 2025. 1, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 350, + 553, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 553, + 403 + ], + "type": "text", + "content": "[65] Xingwu Sun, Yanfeng Chen, Yiqing Huang, Ruobing Xie, Jiaqi Zhu, Kai Zhang, Shuaipeng Li, Zhen Yang, Jonny Han, Xiaobo Shu, et al. Hunyuan-large: An open-source moe model with 52 billion activated parameters by tencent. arXiv preprint arXiv:2411.02265, 2024. 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 404, + 553, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 404, + 553, + 435 + ], + "spans": [ + { + "bbox": [ + 316, + 404, + 553, + 435 + ], + "type": "text", + "content": "[66] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 437, + 553, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 437, + 553, + 491 + ], + "spans": [ + { + "bbox": [ + 316, + 437, + 553, + 491 + ], + "type": "text", + "content": "[67] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "spans": [ + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "type": "text", + "content": "[68] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyals, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 537, + 553, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 537, + 553, + 580 + ], + "spans": [ + { + "bbox": [ + 316, + 537, + 553, + 580 + ], + "type": "text", + "content": "[69] Théophane Vallaeys, Mustafa Shukor, Matthieu Cord, and Jakob Verbeek. Improved baselines for data-efficient perceptual augmentation of llms. arXiv preprint arXiv:2403.13499, 2024. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "type": "text", + "content": "[70] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 625, + 553, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 625, + 553, + 647 + ], + "spans": [ + { + "bbox": [ + 316, + 625, + 553, + 647 + ], + "type": "text", + "content": "[71] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "text", + "content": "[72] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. 7" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "type": "text", + "content": "[73] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 128, + 294, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 128, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 128, + 294, + 205 + ], + "type": "text", + "content": "[74] Siqi Wang, Zhengyu Chen, Bei Li, Keqing He, Min Zhang, and Jingang Wang. Scaling laws across model architectures: A comparative analysis of dense and MoE models in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5583-5595, Miami, Florida, USA, 2024. Association for Computational Linguistics. 8, 18" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 205, + 294, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 294, + 259 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 294, + 259 + ], + "type": "text", + "content": "[75] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 261, + 294, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 261, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 261, + 294, + 304 + ], + "type": "text", + "content": "[76] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 305, + 294, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 305, + 294, + 359 + ], + "spans": [ + { + "bbox": [ + 56, + 305, + 294, + 359 + ], + "type": "text", + "content": "[77] Tianwen Wei, Bo Zhu, Liang Zhao, Cheng Cheng, Biye Li, Weiwei Lu, Peng Cheng, Jianhao Zhang, Xiaoyu Zhang, Liang Zeng, et al. Skywork-moe: A deep dive into training techniques for mixture-of-experts language models. arXiv preprint arXiv:2406.06563, 2024.8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 360, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 294, + 414 + ], + "type": "text", + "content": "[78] Le Xue, Manli Shu, Anas Awadalla, Jun Wang, An Yan, Senthil Purushwalkam, Honglu Zhou, Viraj Prabhu, Yutong Dai, Michael S Ryoo, et al. xgen-mm (blip-3): A family of open large multimodal models. arXiv preprint arXiv:2408.08872, 2024. 1, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 415, + 294, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 415, + 294, + 458 + ], + "spans": [ + { + "bbox": [ + 56, + 415, + 294, + 458 + ], + "type": "text", + "content": "[79] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 460, + 294, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 460, + 294, + 502 + ], + "spans": [ + { + "bbox": [ + 56, + 460, + 294, + 502 + ], + "type": "text", + "content": "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 504, + 294, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 504, + 294, + 557 + ], + "spans": [ + { + "bbox": [ + 56, + 504, + 294, + 557 + ], + "type": "text", + "content": "[81] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Duffer, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 5, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 559, + 294, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 559, + 294, + 612 + ], + "spans": [ + { + "bbox": [ + 56, + 559, + 294, + 612 + ], + "type": "text", + "content": "[82] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 614, + 294, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 614, + 294, + 667 + ], + "spans": [ + { + "bbox": [ + 56, + 614, + 294, + 667 + ], + "type": "text", + "content": "[83] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 669, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 669, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 669, + 294, + 712 + ], + "type": "text", + "content": "[84] Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906, 2022. 8" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 168, + 68, + 442, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 68, + 442, + 110 + ], + "spans": [ + { + "bbox": [ + 168, + 68, + 442, + 110 + ], + "type": "text", + "content": "Scaling Laws for Native Multimodal Models Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 124, + 269, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 124, + 269, + 137 + ], + "spans": [ + { + "bbox": [ + 55, + 124, + 269, + 137 + ], + "type": "text", + "content": "This supplementary material is organized as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 155, + 295, + 274 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 155, + 295, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 155, + 295, + 178 + ], + "spans": [ + { + "bbox": [ + 55, + 155, + 295, + 178 + ], + "type": "text", + "content": "- Appendix A: contains the implementation details and the hyperparameters used to train our models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "type": "text", + "content": "- Appendix B: contains detailed comparison between early and late fusion models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 203, + 295, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 203, + 295, + 225 + ], + "spans": [ + { + "bbox": [ + 56, + 203, + 295, + 225 + ], + "type": "text", + "content": "- Appendix C: contains more details about scaling laws derivation, evaluation and additional results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 227, + 295, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 227, + 295, + 249 + ], + "spans": [ + { + "bbox": [ + 56, + 227, + 295, + 249 + ], + "type": "text", + "content": "- Appendix D: contains discussion about the paper limitations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 251, + 295, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 251, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 251, + 295, + 274 + ], + "type": "text", + "content": "- Appendix E: contains more results about MoEs and modality specialization." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 313, + 173, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 313, + 173, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 313, + 173, + 327 + ], + "type": "text", + "content": "A. Experimental setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "content": "In Table 6, we show the pre-training hyperparameters for different model configurations used to derive the scaling laws. The number of parameters ranges from 275M to 3.7B, with model width increasing accordingly, while the depth remains fixed at 24 layers. Learning rates vary by model size, decreasing as the model scales up. Based on empirical experiments and estimates similar to [46], we found these values to be effective in our setup. Training is optimized using a fully decoupled AdamW optimizer with momentum values " + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.95" + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "content": ", and a weight decay of " + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "inline_equation", + "content": "1\\mathrm{e} - 4" + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "content": ". The batch size is set to 2k samples, which account for 2M tokens, given 1k context length. Gradient clipping is set to 1.0, with a maximum warmup duration of 5k iterations, adjusted for shorter training runs: 1k and 2.5k warmup steps for models trained between 1k-4k and 5k-15k steps, respectively. For MoEs, we found that longer warmup is significantly better, so we adopt a 2.5k warmup for all runs under 20k steps. We use a constant learning rate schedule with cooldown during the final " + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 55, + 342, + 296, + 712 + ], + "type": "text", + "content": " of training, gradually reducing to zero following an inverse square root schedule. For vision processing, image inputs are divided into (14, 14) patches, with augmentations including Random Resized Crop (resizing images to 224px with a scale range of [0.4, 1.0]) and Random Horizontal Flip with a probability of 0.5. We train our models on mixture of interleaved, image captions and text only data Table 5. For late fusion models, we found that using smaller learning rate for the vision encoder significantly boost the performance Table 8, and when both the encoder and decoder are initialized (Appendix B.7) we found that freezing the vision encoder works best Table 7." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 317, + 122, + 553, + 178 + ], + "blocks": [ + { + "bbox": [ + 317, + 122, + 553, + 178 + ], + "lines": [ + { + "bbox": [ + 317, + 122, + 553, + 178 + ], + "spans": [ + { + "bbox": [ + 317, + 122, + 553, + 178 + ], + "type": "table", + "html": "
Data typedataset#samplessampling prob.
DFN [21]2B27%
Image-CaptionCOYO [11]600M11.25%
HQITP[57]400M6.75%
InterleavedObelics [34]141M Docs45%
TextDCLM [39]6.6T Toks10%
", + "image_path": "29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 316, + 232, + 553, + 526 + ], + "blocks": [ + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "lines": [ + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "spans": [ + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "text", + "content": "Table 5. Pre-training data mixture. Unless otherwise specified, the training mixture contains " + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "inline_equation", + "content": "45\\%" + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "inline_equation", + "content": "45\\%" + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 313, + 180, + 553, + 213 + ], + "type": "text", + "content": " of image captions, interleaved documents and text-only data." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 232, + 553, + 526 + ], + "lines": [ + { + "bbox": [ + 316, + 232, + 553, + 526 + ], + "spans": [ + { + "bbox": [ + 316, + 232, + 553, + 526 + ], + "type": "table", + "html": "
Early-fusion
Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35e-44.2e-44e-43.5e-4
Late-fusion
Params289M494M1B1.75B2.43B3.7B
vision encoder width384512768102411841536
vision encoder depth24
width76810241536204824643072
depth24
Learning rate1.5e-31.5e-35e-44.2e-43.8e-43.3e-4
Early-fusion MoEs
Active Params275M468M932M1.63B2.28B3.35B
width80010881632220826243232
depth24
Learning rate1.5e-31.5e-35 e-44.2e-44e-43.5e-4
Training tokens2.5B-600B
OptimizerFully decoupled AdamW [44]
Optimizer Momentumβ1=0.9, β2=0.95
Minimum Learning rate0
Weight decay1e-4
Batch size2k
Patch size(14, 14)
Gradient clipping1.0
MAximum Warmup iterations5k
Augmentations: \nRandomResizedCrop \nsize224px
scale[0.4, 1.0]
RandomHorizontalFlipp=0.5
", + "image_path": "5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 329, + 594, + 542, + 673 + ], + "blocks": [ + { + "bbox": [ + 313, + 536, + 555, + 570 + ], + "lines": [ + { + "bbox": [ + 313, + 536, + 555, + 570 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 555, + 570 + ], + "type": "text", + "content": "Table 6. Pre-training hyperparameters We detail the hyperparameters used for pre-training different model configurations to derive scaling laws." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 329, + 594, + 542, + 673 + ], + "lines": [ + { + "bbox": [ + 329, + 594, + 542, + 673 + ], + "spans": [ + { + "bbox": [ + 329, + 594, + 542, + 673 + ], + "type": "table", + "html": "
Vision encoder\nlr schedulerInterleaved\n(CE)Image-Caption\n(CE)Text\n(CE)AVG\n(CE)AVG (SFT)\n(Acc)
12.5212.152.8672.51343.49
0.12.5022.0662.8622.47752.27
0.012.5022.0662.8592.47653.76
0.0012.5132.0662.8572.479-
0 (frozen)2.5042.0612.8562.47454.14
", + "image_path": "bd11f503cbb83405e5763b94f0bf05d0647d5566a363956255066979df79ee25.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 674, + 555, + 706 + ], + "lines": [ + { + "bbox": [ + 313, + 674, + 555, + 706 + ], + "spans": [ + { + "bbox": [ + 313, + 674, + 555, + 706 + ], + "type": "text", + "content": "Table 7. Vision encoder scalar. Freezing the vision encoder works best when initializing late-fusion models with pre-trained models." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 223, + 192 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 223, + 192 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 223, + 192 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 223, + 192 + ], + "type": "image", + "image_path": "e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 225, + 71, + 378, + 192 + ], + "blocks": [ + { + "bbox": [ + 225, + 71, + 378, + 192 + ], + "lines": [ + { + "bbox": [ + 225, + 71, + 378, + 192 + ], + "spans": [ + { + "bbox": [ + 225, + 71, + 378, + 192 + ], + "type": "image", + "image_path": "48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 389, + 72, + 544, + 192 + ], + "blocks": [ + { + "bbox": [ + 389, + 72, + 544, + 192 + ], + "lines": [ + { + "bbox": [ + 389, + 72, + 544, + 192 + ], + "spans": [ + { + "bbox": [ + 389, + 72, + 544, + 192 + ], + "type": "image", + "image_path": "768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 146, + 197, + 465, + 214 + ], + "blocks": [ + { + "bbox": [ + 146, + 197, + 465, + 214 + ], + "lines": [ + { + "bbox": [ + 146, + 197, + 465, + 214 + ], + "spans": [ + { + "bbox": [ + 146, + 197, + 465, + 214 + ], + "type": "image", + "image_path": "1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 222, + 555, + 246 + ], + "lines": [ + { + "bbox": [ + 55, + 222, + 555, + 246 + ], + "spans": [ + { + "bbox": [ + 55, + 222, + 555, + 246 + ], + "type": "text", + "content": "Figure 14. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the model size and the number of training tokens. The gap decreases mainly due to scaling models size." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 259, + 225, + 379 + ], + "blocks": [ + { + "bbox": [ + 58, + 259, + 225, + 379 + ], + "lines": [ + { + "bbox": [ + 58, + 259, + 225, + 379 + ], + "spans": [ + { + "bbox": [ + 58, + 259, + 225, + 379 + ], + "type": "image", + "image_path": "94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 401, + 555, + 436 + ], + "lines": [ + { + "bbox": [ + 55, + 401, + 555, + 436 + ], + "spans": [ + { + "bbox": [ + 55, + 401, + 555, + 436 + ], + "type": "text", + "content": "Figure 15. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models become better when increasing the proportion of interleaved documents. Early and late fusion has 1.63B and 1.75B parameters respectively." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 225, + 260, + 382, + 378 + ], + "blocks": [ + { + "bbox": [ + 225, + 260, + 382, + 378 + ], + "lines": [ + { + "bbox": [ + 225, + 260, + 382, + 378 + ], + "spans": [ + { + "bbox": [ + 225, + 260, + 382, + 378 + ], + "type": "image", + "image_path": "35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 389, + 275, + 547, + 378 + ], + "blocks": [ + { + "bbox": [ + 389, + 275, + 547, + 378 + ], + "lines": [ + { + "bbox": [ + 389, + 275, + 547, + 378 + ], + "spans": [ + { + "bbox": [ + 389, + 275, + 547, + 378 + ], + "type": "image", + "image_path": "81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 69, + 456, + 284, + 514 + ], + "blocks": [ + { + "bbox": [ + 69, + 456, + 284, + 514 + ], + "lines": [ + { + "bbox": [ + 69, + 456, + 284, + 514 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 284, + 514 + ], + "type": "table", + "html": "
Vision encoder lrScalerInterleaved (CE)Image-Caption (CE)Text (CE)AVG (CE)AVG (SFT) (Acc)
0.12.6742.2193.0722.65534.84
0.012.6722.1973.0712.64738.77
0.0012.6742.2183.0732.65538.46
", + "image_path": "151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 514, + 295, + 548 + ], + "lines": [ + { + "bbox": [ + 55, + 514, + 295, + 548 + ], + "spans": [ + { + "bbox": [ + 55, + 514, + 295, + 548 + ], + "type": "text", + "content": "Table 8. Vision encoder scalar. Reducing the learning rate for the vision encoder is better when training late-fusion models from scratch." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 551, + 174, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 551, + 174, + 565 + ], + "spans": [ + { + "bbox": [ + 55, + 551, + 174, + 565 + ], + "type": "text", + "content": "B. Late vs early fusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 571, + 295, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 571, + 295, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 571, + 295, + 594 + ], + "type": "text", + "content": "This section provides additional comparison between early and late fusion models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 600, + 151, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 600, + 151, + 613 + ], + "spans": [ + { + "bbox": [ + 55, + 600, + 151, + 613 + ], + "type": "text", + "content": "B.1. Scaling FLOPs" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "type": "text", + "content": "Figure 14 compares early-fusion and late-fusion models when scaling FLOPs. Specifically, for each model size, we train multiple models using different amounts of training tokens. The performance gap between the two approaches mainly decreases due to increasing model sizes rather than increasing the number of training tokens. Despite the decreasing gap, across all the models that we train, early-fusion consistently outperform late-fusion." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 457, + 506, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 457, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 506, + 470 + ], + "type": "text", + "content": "B.2. Changing the training data mixture" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 475, + 555, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 555, + 631 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 555, + 631 + ], + "type": "text", + "content": "We analyze how the performance gap between early and late fusion models changes with variations in the training data mixture. As shown in Figure 16 and Figure 15, when fixing the model size, increasing the ratio of text and interleaved data favors early fusion. Interestingly, the gap remains largely unchanged for other data types. We also observe interference effects between different data types. Specifically, increasing the amount of interleaved data negatively impacts performance on image captions and vice versa. Additionally, increasing the proportion of text-only data slightly improves interleaved performance but increases loss on image captions. Overall, we find that text-only and interleaved data are correlated across different setups." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 635, + 555, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 635, + 555, + 659 + ], + "spans": [ + { + "bbox": [ + 313, + 635, + 555, + 659 + ], + "type": "text", + "content": "B.3. Scaling image resolution is in favor of early-fusion" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "content": "We examine how both architectures perform with varying image resolution. We fix the number of model parameters to 1.63B and 1.75B for early and late fusion respectively. All models are trained for 100K steps or 200B tokens. Since" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 89, + 218, + 192 + ], + "blocks": [ + { + "bbox": [ + 59, + 89, + 218, + 192 + ], + "lines": [ + { + "bbox": [ + 59, + 89, + 218, + 192 + ], + "spans": [ + { + "bbox": [ + 59, + 89, + 218, + 192 + ], + "type": "image", + "image_path": "5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 215, + 555, + 248 + ], + "lines": [ + { + "bbox": [ + 55, + 215, + 555, + 248 + ], + "spans": [ + { + "bbox": [ + 55, + 215, + 555, + 248 + ], + "type": "text", + "content": "Figure 16. Early vs late fusion: changing the amount of text-only data in the training mixture (isoFLOPs). We vary the ratio of text-only data and plot the final training loss. The gap increases with the text data ratio in favor of early fusion model. Early fusion has 1.63B parameters and late fusion 1.75B parameters." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 224, + 89, + 383, + 192 + ], + "blocks": [ + { + "bbox": [ + 224, + 89, + 383, + 192 + ], + "lines": [ + { + "bbox": [ + 224, + 89, + 383, + 192 + ], + "spans": [ + { + "bbox": [ + 224, + 89, + 383, + 192 + ], + "type": "image", + "image_path": "4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 391, + 88, + 544, + 192 + ], + "blocks": [ + { + "bbox": [ + 391, + 88, + 544, + 192 + ], + "lines": [ + { + "bbox": [ + 391, + 88, + 544, + 192 + ], + "spans": [ + { + "bbox": [ + 391, + 88, + 544, + 192 + ], + "type": "image", + "image_path": "017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 270, + 177, + 386 + ], + "blocks": [ + { + "bbox": [ + 58, + 270, + 177, + 386 + ], + "lines": [ + { + "bbox": [ + 58, + 270, + 177, + 386 + ], + "spans": [ + { + "bbox": [ + 58, + 270, + 177, + 386 + ], + "type": "image", + "image_path": "3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 395, + 295, + 482 + ], + "lines": [ + { + "bbox": [ + 55, + 395, + 295, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 395, + 295, + 482 + ], + "type": "text", + "content": "Figure 17. Early vs late fusion: training with different image resolutions (isoFLOPs). For the same training FLOPs we vary the image resolution (and thus the number of image tokens) during training and report the final training loss. Increasing resolution, hurts the performance on text and interleaved documents, while helping image captioning. The gap stays almost the same on text and interleaved data while slightly increase on image captioning in favor of early fusion." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 178, + 270, + 288, + 386 + ], + "blocks": [ + { + "bbox": [ + 178, + 270, + 288, + 386 + ], + "lines": [ + { + "bbox": [ + 178, + 270, + 288, + 386 + ], + "spans": [ + { + "bbox": [ + 178, + 270, + 288, + 386 + ], + "type": "image", + "image_path": "9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 487, + 295, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 487, + 295, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 487, + 295, + 594 + ], + "type": "text", + "content": "the patch size remains constant, increasing the resolution results in a higher number of visual tokens. For all resolutions, we maintain the same number of text tokens. As shown in Figure 17, the early-fusion model consistently outperforms the late-fusion model across resolutions, particularly for multimodal data, with the performance gap widening at higher resolutions. Additionally, we observe that the loss on text and interleaved data increases as resolution increases." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 600, + 295, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 600, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 55, + 600, + 295, + 625 + ], + "type": "text", + "content": "B.4. Early-fusion is consistently better when matching the late-fusion model size" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 629, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 295, + 713 + ], + "type": "text", + "content": "In this section, we compare the late-fusion model with different configurations of early-fusion one. Specifically, we train early-fusion models that match the late-fusion model in total parameters (Params), text model size (Text), and FLOPs (FLOPs), assuming 45-45-10 training mixture. As shown in Figure 18, early fusion consistently outperforms late fusion when normalized by total parameters, followed" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 271, + 554, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 271, + 554, + 307 + ], + "spans": [ + { + "bbox": [ + 313, + 271, + 554, + 307 + ], + "type": "text", + "content": "by normalization by FLOPs. When matching the text model size, early fusion performs better at higher ratios of interleaved data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 314, + 499, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 314, + 499, + 327 + ], + "spans": [ + { + "bbox": [ + 313, + 314, + 499, + 327 + ], + "type": "text", + "content": "B.5. Different late-fusion configuration" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 331, + 555, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 331, + 555, + 439 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 555, + 439 + ], + "type": "text", + "content": "We examine how this scaling changes with different late-fusion configurations. Instead of scaling both the vision and text models equally, as done in the main paper, we fix the vision encoder size to 300M and scale only the text model. Figure 19 shows that late-fusion models lag behind at smaller model sizes, with the gap closing significantly as the text model scales. This suggests that allocating more parameters to shared components is more beneficial, further supporting the choice of early-fusion models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 445, + 454, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 445, + 454, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 454, + 458 + ], + "type": "text", + "content": "B.6. Different context lengths" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 462, + 554, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 462, + 554, + 559 + ], + "spans": [ + { + "bbox": [ + 313, + 462, + 554, + 559 + ], + "type": "text", + "content": "In the paper, we use a 1k context length following [31]. Also following, this paper, we ignore the context length effect, as the model dimension dominates the training compute estimate. Moreover, [53] empirically found that scaling coefficients are robust to context length. Nevertheless, Our initial experiments (Figure 20) indicate that scaling the context length did not significantly affect the comparison between late and early fusion." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 564, + 493, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 564, + 493, + 577 + ], + "spans": [ + { + "bbox": [ + 313, + 564, + 493, + 577 + ], + "type": "text", + "content": "B.7. Initializing from LLM and CLIP" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "content": "We study the case where both late and early fusion models are initialized from pre-trained models, specifically DCLM-1B [39] and CLIP-ViT-L [55] for late fusion. Interestingly, Figure 21 shows that for text and interleaved multimodal documents, early fusion can match the performance of late fusion when trained for longer. However, closing the gap on image caption data remains more challenging. Notably, when considering the overall training cost, including that of pre-trained models, early fusion requires significantly longer training to compensate for the vision encoder's pretraining cost." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 71, + 217, + 208 + ], + "blocks": [ + { + "bbox": [ + 65, + 71, + 217, + 208 + ], + "lines": [ + { + "bbox": [ + 65, + 71, + 217, + 208 + ], + "spans": [ + { + "bbox": [ + 65, + 71, + 217, + 208 + ], + "type": "image", + "image_path": "bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 224, + 70, + 396, + 207 + ], + "blocks": [ + { + "bbox": [ + 224, + 70, + 396, + 207 + ], + "lines": [ + { + "bbox": [ + 224, + 70, + 396, + 207 + ], + "spans": [ + { + "bbox": [ + 224, + 70, + 396, + 207 + ], + "type": "image", + "image_path": "f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 402, + 68, + 539, + 209 + ], + "blocks": [ + { + "bbox": [ + 402, + 68, + 539, + 209 + ], + "lines": [ + { + "bbox": [ + 402, + 68, + 539, + 209 + ], + "spans": [ + { + "bbox": [ + 402, + 68, + 539, + 209 + ], + "type": "image", + "image_path": "8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 197, + 209, + 413, + 227 + ], + "blocks": [ + { + "bbox": [ + 197, + 209, + 413, + 227 + ], + "lines": [ + { + "bbox": [ + 197, + 209, + 413, + 227 + ], + "spans": [ + { + "bbox": [ + 197, + 209, + 413, + 227 + ], + "type": "image", + "image_path": "1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 231, + 555, + 265 + ], + "lines": [ + { + "bbox": [ + 55, + 231, + 555, + 265 + ], + "spans": [ + { + "bbox": [ + 55, + 231, + 555, + 265 + ], + "type": "text", + "content": "Figure 18. Early vs late fusion: changing the training mixture and early-fusion configuration. We vary the training mixtures and plot the final training loss for different configuration of early fusion models. For the same number of total parameters early fusion consistently outperform late fusion." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 277, + 217, + 429 + ], + "blocks": [ + { + "bbox": [ + 58, + 277, + 217, + 429 + ], + "lines": [ + { + "bbox": [ + 58, + 277, + 217, + 429 + ], + "spans": [ + { + "bbox": [ + 58, + 277, + 217, + 429 + ], + "type": "image", + "image_path": "b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 224, + 279, + 383, + 430 + ], + "blocks": [ + { + "bbox": [ + 224, + 279, + 383, + 430 + ], + "lines": [ + { + "bbox": [ + 224, + 279, + 383, + 430 + ], + "spans": [ + { + "bbox": [ + 224, + 279, + 383, + 430 + ], + "type": "image", + "image_path": "de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 391, + 279, + 549, + 430 + ], + "blocks": [ + { + "bbox": [ + 391, + 279, + 549, + 430 + ], + "lines": [ + { + "bbox": [ + 391, + 279, + 549, + 430 + ], + "spans": [ + { + "bbox": [ + 391, + 279, + 549, + 430 + ], + "type": "image", + "image_path": "4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 161, + 430, + 450, + 460 + ], + "blocks": [ + { + "bbox": [ + 161, + 430, + 450, + 460 + ], + "lines": [ + { + "bbox": [ + 161, + 430, + 450, + 460 + ], + "spans": [ + { + "bbox": [ + 161, + 430, + 450, + 460 + ], + "type": "image", + "image_path": "9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 475, + 555, + 510 + ], + "lines": [ + { + "bbox": [ + 55, + 475, + 555, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 475, + 555, + 510 + ], + "type": "text", + "content": "Figure 19. Early vs late fusion: scaling training FLOPs while fixing the vision encoder size. We compare early and late fusion models when scaling both the amount of training tokens and model sizes. For late fusion mdoels, we fix the vision encoder size (300M) and scale the text model (250M, 834M, 2B, 3B). The gap between early and late get tighter when scaling the text model." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 94, + 529, + 284, + 670 + ], + "blocks": [ + { + "bbox": [ + 94, + 529, + 284, + 670 + ], + "lines": [ + { + "bbox": [ + 94, + 529, + 284, + 670 + ], + "spans": [ + { + "bbox": [ + 94, + 529, + 284, + 670 + ], + "type": "image", + "image_path": "10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 683, + 293, + 695 + ], + "lines": [ + { + "bbox": [ + 58, + 683, + 293, + 695 + ], + "spans": [ + { + "bbox": [ + 58, + 683, + 293, + 695 + ], + "type": "text", + "content": "Figure 20. Early vs late fusion with different context lengths." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 320, + 532, + 556, + 643 + ], + "blocks": [ + { + "bbox": [ + 320, + 532, + 556, + 643 + ], + "lines": [ + { + "bbox": [ + 320, + 532, + 556, + 643 + ], + "spans": [ + { + "bbox": [ + 320, + 532, + 556, + 643 + ], + "type": "image", + "image_path": "af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 647, + 555, + 692 + ], + "lines": [ + { + "bbox": [ + 313, + 647, + 555, + 692 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 555, + 692 + ], + "type": "text", + "content": "Figure 21. Early vs late fusion when initializing the encoder and decoder. Early-fusion can match the performance of late-fusion models when trained for longer. However, the gap is bigger on image-caption data." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 71, + 138, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 71, + 138, + 85 + ], + "spans": [ + { + "bbox": [ + 55, + 71, + 138, + 85 + ], + "type": "text", + "content": "C. Scaling laws" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 90, + 173, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 90, + 173, + 104 + ], + "spans": [ + { + "bbox": [ + 55, + 90, + 173, + 104 + ], + "type": "text", + "content": "C.1. Fitting " + }, + { + "bbox": [ + 55, + 90, + 173, + 104 + ], + "type": "inline_equation", + "content": "L = F(N,D)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 108, + 295, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 108, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 108, + 295, + 133 + ], + "type": "text", + "content": "Following [26], we determine the parameters that minimize the following objective across all our runs " + }, + { + "bbox": [ + 55, + 108, + 295, + 133 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 108, + 295, + 133 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 137, + 295, + 166 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 137, + 295, + 166 + ], + "spans": [ + { + "bbox": [ + 57, + 137, + 295, + 166 + ], + "type": "interline_equation", + "content": "\\min _ {a, b, e, \\alpha , \\beta} \\sum_ {i} \\operatorname {H u b e r} _ {\\delta} \\left(\\operatorname {L S E} \\left(a - \\alpha \\log N _ {i}, b - \\beta \\log D _ {i}, e\\right) - \\log L _ {i}\\right), \\tag {2}", + "image_path": "12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": "We perform this optimization across various initialization ranges and select the parameters that achieve the lowest loss across all initializations. Specifically, our grid search spans " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\{0, 0.5, 2.5\\}" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\{0, 5, 10, \\dots, 30\\}" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\{-1, -0.5, 1, 0.5\\}" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": ". We use the L-BFGS algorithm with " + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "inline_equation", + "content": "\\delta = 1e - 3" + }, + { + "bbox": [ + 55, + 167, + 296, + 238 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 246, + 231, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 246, + 231, + 259 + ], + "spans": [ + { + "bbox": [ + 55, + 246, + 231, + 259 + ], + "type": "text", + "content": "C.2. Fitting " + }, + { + "bbox": [ + 55, + 246, + 231, + 259 + ], + "type": "inline_equation", + "content": "N \\propto C^{a}, D \\propto C^{b}, D \\propto N^{d}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": "While these equations have a closed-form solution [26] for early-fusion models that can be derived from Eq 1, this is not the case for late-fusion models without specifying either the vision encoder or text model size. To ensure a fair comparison, we derive these equations for both models, by performing linear regression in log space. We found that the regression is very close to the coefficient found with closed-form derivation Table 9. For instance, to derive " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N = K_{a}C^{a}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": ", given a FLOP budget " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " and a set of linearly spaced tokens " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "D_{i}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " ranging from 10B to 600B, we compute the model size for each " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "D_{i}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N_{i} = \\frac{C}{6D}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " for early fusion and " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N_{i} = \\frac{C}{6D} + 0.483 * N_{v}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " for late fusion (for the 45-45-10 mixture, " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "D_{v} = 0.544D" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": ", thus " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "C = 6D(0.544N_{v} + N_{t})" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": "). We then apply Eq 1 to obtain the loss for each model size and select " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " that has the minimum loss. We repeat this for all FLOP values corresponding to our runs, resulting in a set of points " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "(C, N_{opt})" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " that we use to regress " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "K_{a}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": ". We follow a similar procedure to find " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": ". For late-fusion models, we regress a linear model to determine " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N_{v}" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 263, + 296, + 552 + ], + "type": "text", + "content": ". Notably, even though we maintain a fixed width ratio for late-fusion models, this approach is more accurate, as embedding layers prevent a strictly fixed ratio between text and vision model sizes. We present the regression results in Figure 22." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 70, + 563, + 284, + 604 + ], + "blocks": [ + { + "bbox": [ + 70, + 563, + 284, + 604 + ], + "lines": [ + { + "bbox": [ + 70, + 563, + 284, + 604 + ], + "spans": [ + { + "bbox": [ + 70, + 563, + 284, + 604 + ], + "type": "table", + "html": "
Modelabdndn
Closed form0.526490.473510.899381.11188-0.05298
Regression0.523910.475340.900521.10224-0.04933
", + "image_path": "675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 604, + 295, + 638 + ], + "lines": [ + { + "bbox": [ + 55, + 604, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 604, + 295, + 638 + ], + "type": "text", + "content": "Table 9. Scaling laws parameters for early-fusion. Doing regression to derive the scaling laws coefficients leads to very close results to using the closed-form solution." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 659, + 148, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 659, + 148, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 659, + 148, + 673 + ], + "type": "text", + "content": "C.3. Fitting " + }, + { + "bbox": [ + 55, + 659, + 148, + 673 + ], + "type": "inline_equation", + "content": "L \\propto C^c" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "To determine the relationship between the final model loss and the compute budget " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": ", we begin by interpolating the points corresponding to the same model size and compute" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": "the convex hull that covers the minimum loss achieved by all runs for each FLOP. This results in a continuous mapping from the FLOPs to the lowest loss. We consider a range of FLOPs, excluding very small values " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "(\\leq 3e^{19})" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": ", and construct a dataset of " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "(C,L)" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": " for linearly spaced compute " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": ". Using this data, we find the linear relationship between " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": " in the log space and deduce the exponent " + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 313, + 72, + 555, + 168 + ], + "type": "text", + "content": ". We visualize the results in Figure 26." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 317, + 182, + 432, + 260 + ], + "blocks": [ + { + "bbox": [ + 317, + 182, + 432, + 260 + ], + "lines": [ + { + "bbox": [ + 317, + 182, + 432, + 260 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 432, + 260 + ], + "type": "image", + "image_path": "163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 262, + 389, + 270 + ], + "lines": [ + { + "bbox": [ + 382, + 262, + 389, + 270 + ], + "spans": [ + { + "bbox": [ + 382, + 262, + 389, + 270 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 437, + 181, + 545, + 260 + ], + "blocks": [ + { + "bbox": [ + 437, + 181, + 545, + 260 + ], + "lines": [ + { + "bbox": [ + 437, + 181, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 437, + 181, + 545, + 260 + ], + "type": "image", + "image_path": "946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 495, + 262, + 503, + 270 + ], + "lines": [ + { + "bbox": [ + 495, + 262, + 503, + 270 + ], + "spans": [ + { + "bbox": [ + 495, + 262, + 503, + 270 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 317, + 275, + 438, + 354 + ], + "blocks": [ + { + "bbox": [ + 317, + 275, + 438, + 354 + ], + "lines": [ + { + "bbox": [ + 317, + 275, + 438, + 354 + ], + "spans": [ + { + "bbox": [ + 317, + 275, + 438, + 354 + ], + "type": "image", + "image_path": "8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 438, + 275, + 544, + 354 + ], + "blocks": [ + { + "bbox": [ + 438, + 275, + 544, + 354 + ], + "lines": [ + { + "bbox": [ + 438, + 275, + 544, + 354 + ], + "spans": [ + { + "bbox": [ + 438, + 275, + 544, + 354 + ], + "type": "image", + "image_path": "45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 317, + 365, + 456, + 443 + ], + "blocks": [ + { + "bbox": [ + 388, + 357, + 395, + 364 + ], + "lines": [ + { + "bbox": [ + 388, + 357, + 395, + 364 + ], + "spans": [ + { + "bbox": [ + 388, + 357, + 395, + 364 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 317, + 365, + 456, + 443 + ], + "lines": [ + { + "bbox": [ + 317, + 365, + 456, + 443 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 456, + 443 + ], + "type": "image", + "image_path": "130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 445, + 403, + 453 + ], + "lines": [ + { + "bbox": [ + 395, + 445, + 403, + 453 + ], + "spans": [ + { + "bbox": [ + 395, + 445, + 403, + 453 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 457, + 365, + 548, + 443 + ], + "blocks": [ + { + "bbox": [ + 493, + 357, + 501, + 364 + ], + "lines": [ + { + "bbox": [ + 493, + 357, + 501, + 364 + ], + "spans": [ + { + "bbox": [ + 493, + 357, + 501, + 364 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 457, + 365, + 548, + 443 + ], + "lines": [ + { + "bbox": [ + 457, + 365, + 548, + 443 + ], + "spans": [ + { + "bbox": [ + 457, + 365, + 548, + 443 + ], + "type": "image", + "image_path": "8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 500, + 445, + 507, + 453 + ], + "lines": [ + { + "bbox": [ + 500, + 445, + 507, + 453 + ], + "spans": [ + { + "bbox": [ + 500, + 445, + 507, + 453 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 469, + 555, + 502 + ], + "lines": [ + { + "bbox": [ + 313, + 469, + 555, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 469, + 555, + 502 + ], + "type": "text", + "content": "Figure 22. Regression results of the scaling laws coefficients. our estimation of the scaling coefficients is close to the closed form solution." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 528, + 534, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 528, + 534, + 541 + ], + "spans": [ + { + "bbox": [ + 313, + 528, + 534, + 541 + ], + "type": "text", + "content": "C.4. Scaling laws for different target data type" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": "In Figure 27, we derive the scaling laws for different target data types. In general, we observe that the model learns image captioning faster than interleaved data, as indicated by the higher absolute value of the scaling exponent (e.g., 0.062 vs 0.046), despite using the same data ratio for captioning and interleaved data (45% each). Additionally, we find that the model learns more slowly on text-only data, likely due to the smaller amount of text-only data (10%). Across model configurations, we find that early fusion scales similarly to late fusion on image captioning but has a lower multiplicative constant (49.99 vs 47.97). For MoEs, the model learns faster but exhibits a higher multiplicative constant. On text and interleaved data, early and late fusion models scale similarly and achieve comparable" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 68, + 312, + 257 + ], + "blocks": [ + { + "bbox": [ + 83, + 68, + 312, + 257 + ], + "lines": [ + { + "bbox": [ + 83, + 68, + 312, + 257 + ], + "spans": [ + { + "bbox": [ + 83, + 68, + 312, + 257 + ], + "type": "image", + "image_path": "05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 271, + 553, + 283 + ], + "lines": [ + { + "bbox": [ + 55, + 271, + 553, + 283 + ], + "spans": [ + { + "bbox": [ + 55, + 271, + 553, + 283 + ], + "type": "text", + "content": "Figure 23. Observed vs predicted loss. We visualize the loss predicted by our scaling laws (Eq 1) and the actual loss achieved by each run." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 315, + 68, + 545, + 258 + ], + "blocks": [ + { + "bbox": [ + 315, + 68, + 545, + 258 + ], + "lines": [ + { + "bbox": [ + 315, + 68, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 315, + 68, + 545, + 258 + ], + "type": "image", + "image_path": "eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 306, + 295, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 306, + 295, + 331 + ], + "spans": [ + { + "bbox": [ + 55, + 306, + 295, + 331 + ], + "type": "text", + "content": "performance. However, MoEs demonstrate better overall performance while learning slightly more slowly." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 340, + 283, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 340, + 283, + 353 + ], + "spans": [ + { + "bbox": [ + 55, + 340, + 283, + 353 + ], + "type": "text", + "content": "C.5. Scaling laws for different training mixtures" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "spans": [ + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "text", + "content": "We investigate how the scaling laws change when modifying the training mixtures. Specifically, we vary the ratio of image caption, interleaved, and text-only data and report the results in Figure 28. Overall, we observe similar scaling trends, with only minor changes in the scaling coefficients. Upon closer analysis, we find that increasing the ratio of a particular data type in the training mixture, leads to a corresponding increase in its scaling exponent. For instance, increasing the ratio of image captions from " + }, + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 54, + 358, + 295, + 515 + ], + "type": "text", + "content": " raises the absolute value of the exponent from 0.056 to 0.061. However, for text-only data, we do not observe significant changes in the scaling coefficients when varying its proportion in the training mixture." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 94, + 528, + 260, + 569 + ], + "blocks": [ + { + "bbox": [ + 94, + 528, + 260, + 569 + ], + "lines": [ + { + "bbox": [ + 94, + 528, + 260, + 569 + ], + "spans": [ + { + "bbox": [ + 94, + 528, + 260, + 569 + ], + "type": "table", + "html": "
ParameterMSER2MAE (%)
Held-in0.00290.98070.8608
Held-out0.00040.96820.5530
", + "image_path": "62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 70, + 634, + 284, + 672 + ], + "blocks": [ + { + "bbox": [ + 55, + 571, + 295, + 605 + ], + "lines": [ + { + "bbox": [ + 55, + 571, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 571, + 295, + 605 + ], + "type": "text", + "content": "Table 10. Scaling laws prediction errors. We report the mean square error, R2 and mean absolute error for the loss prediction for held-in and held-out (8B model) data." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 634, + 284, + 672 + ], + "lines": [ + { + "bbox": [ + 70, + 634, + 284, + 672 + ], + "spans": [ + { + "bbox": [ + 70, + 634, + 284, + 672 + ], + "type": "table", + "html": "
ModelEαβabd
Avg1.809220.298420.332090.543020.483010.92375
Std0.338110.101010.028920.088130.057870.23296
", + "image_path": "c108eee9a2065d7228436e8d9b0fa0023a328984cbb26e12f2be985be92453a1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 673, + 295, + 696 + ], + "lines": [ + { + "bbox": [ + 55, + 673, + 295, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 673, + 295, + 696 + ], + "type": "text", + "content": "Table 11. Scaling laws sensitivity. We report the mean and standard deviation after bootstrapping with 100 iterations." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 305, + 448, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 305, + 448, + 317 + ], + "spans": [ + { + "bbox": [ + 314, + 305, + 448, + 317 + ], + "type": "text", + "content": "C.6. Scaling laws evaluation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 323, + 555, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 555, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 555, + 443 + ], + "type": "text", + "content": "For each model size and number of training tokens, we compute the loss using the estimated functional form in Eq 1 and compare it to the actual loss observed in our runs. Figure 23, Figure 24, and Table 10 visualizes these comparisons, showing that our estimation is highly accurate, particularly for lower loss values and larger FLOPs. We also assess our scaling laws in an extrapolation setting, predicting performance beyond the model sizes used for fitting. Notably, our approach estimates the performance of an 8B model with reasonable accuracy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "content": "Additionally, we conduct a sensitivity analysis using bootstrapping. Specifically, we sample " + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "content": " points with replacement (" + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "content": " being the total number of trained models) and re-estimate the scaling law coefficients. This process is repeated 100 times, and we report the mean and standard deviation of each coefficient. Table 11 shows that our estimation is more precise for " + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "content": " than for " + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 445, + 556, + 554 + ], + "type": "text", + "content": ", primarily due to the smaller number of model sizes relative to the number of different token counts used to derive the scaling laws." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 562, + 484, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 562, + 484, + 575 + ], + "spans": [ + { + "bbox": [ + 313, + 562, + 484, + 575 + ], + "type": "text", + "content": "C.7. Scaling laws for sparse NMMs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 580, + 555, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 580, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 580, + 555, + 689 + ], + "type": "text", + "content": "Similar to dense models, we fit a parametric loss function (Eq 1) to predict the loss of sparse NMMs based on the number of parameters and training tokens, replacing the total parameter count with the number of active parameters. While incorporating sparsity is standard when deriving scaling laws for MoEs [2, 33, 74], we focus on deriving scaling laws specific to the sparsity level used in our MoE setup. This yields coefficients that are implicitly conditioned on the sparsity configuration." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 689, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 556, + 715 + ], + "type": "text", + "content": "We also experiment with a sparsity-aware formulation of the scaling law as proposed in [2], and observe consistent" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 68, + 289, + 251 + ], + "blocks": [ + { + "bbox": [ + 66, + 68, + 289, + 251 + ], + "lines": [ + { + "bbox": [ + 66, + 68, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 66, + 68, + 289, + 251 + ], + "type": "image", + "image_path": "a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 255, + 295, + 300 + ], + "lines": [ + { + "bbox": [ + 56, + 255, + 295, + 300 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 295, + 300 + ], + "type": "text", + "content": "Figure 24. Observed vs predicted loss. We visualize the loss predicted by our scaling laws Eq 1 and the actual loss achieved by each run. We can reliably predict the performance of models larger (8B params) than those used to fit the scaling laws." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "text", + "content": "trends (Table 12). In particular, the exponents associated with model size " + }, + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "inline_equation", + "content": "(N)" + }, + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "text", + "content": " are substantially larger than those for training tokens " + }, + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "inline_equation", + "content": "(\\beta)" + }, + { + "bbox": [ + 55, + 303, + 296, + 376 + ], + "type": "text", + "content": ", reinforcing the importance of scaling model size in sparse architectures. Additionally, we observe that the terms governing the scaling of active parameters decompose into two components." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 389, + 212, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 212, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 212, + 403 + ], + "type": "text", + "content": "D. Discussion and Limitations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 411, + 296, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 411, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 411, + 296, + 495 + ], + "type": "text", + "content": "Scaling laws for multimodal data mixtures. Our scaling laws study spans different model configurations and training mixtures. While results suggest that the scaling law coefficients remain largely consistent across mixtures, a broader exploration of mixture variations is needed to validate this observation and establish a unified scaling law that accounts for this factor." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 496, + 295, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 496, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 496, + 295, + 567 + ], + "type": "text", + "content": "Scaling laws and performance on downstream tasks. Similar to previous scaling law studies, our analysis focuses on pretraining performance as measured by the validation loss. However, the extent to which these findings translate to downstream performance remains an open question and requires further investigation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 568, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 295, + 640 + ], + "type": "text", + "content": "Extrapolation to larger scales. The accuracy of scaling law predictions improves with increasing FLOPs Appendix C. Furthermore, we validate our laws when extrapolating to larger model sizes (Appendix C.6). However, whether these laws can be reliably extrapolated to extremely large model sizes remains an open question." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": "High resolution and early-fusion models. Training early-fusion models with high-resolution inputs leads to a significant increase in vision tokens. While pooling techniques have been widely adopted for late-fusion models, alternative approaches may be necessary for early fusion. Given the similarity of early-fusion models to LLMs, it appears" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "type": "text", + "content": "that techniques for extending context length could be beneficial." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 95, + 555, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 95, + 555, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 95, + 555, + 192 + ], + "type": "text", + "content": "Scaling laws for multimodal MoEs models. For MoEs, we consider only a single configuration (top-1 routing with 8 experts). We found this configuration to work reasonably well in our setup, and follow a standard MoEs implementation. However, the findings may vary when optimizing more the MoE architecture or exploring different load-balancing, routing strategies or different experts implementations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 202, + 553, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 202, + 553, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 202, + 553, + 230 + ], + "type": "text", + "content": "E. Mixture of experts and modality-specific specialization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 236, + 432, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 236, + 432, + 248 + ], + "spans": [ + { + "bbox": [ + 313, + 236, + 432, + 248 + ], + "type": "text", + "content": "E.1. MoEs configuration" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 253, + 555, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 253, + 555, + 291 + ], + "spans": [ + { + "bbox": [ + 313, + 253, + 555, + 291 + ], + "type": "text", + "content": "We experiment with different MoEs configuration by changing the number of experts and the top-k. We report a sample of these experiments in Table 13." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 297, + 432, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 297, + 432, + 309 + ], + "spans": [ + { + "bbox": [ + 314, + 297, + 432, + 309 + ], + "type": "text", + "content": "E.2. MoEs specialization" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 353, + 326, + 517, + 446 + ], + "blocks": [ + { + "bbox": [ + 353, + 326, + 517, + 446 + ], + "lines": [ + { + "bbox": [ + 353, + 326, + 517, + 446 + ], + "spans": [ + { + "bbox": [ + 353, + 326, + 517, + 446 + ], + "type": "image", + "image_path": "0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 449, + 555, + 483 + ], + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 483 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 483 + ], + "type": "text", + "content": "Figure 25. Modality-specific specialization. We visualize the experts specialization to text and image modalities. Models are evaluated on Obelics." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 498, + 556, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 666 + ], + "type": "text", + "content": "We investigate multimodal specialization in MoE architectures. We compute a specialization score as the average difference between the number of text/images tokens assigned to each expert and a uniform assignment " + }, + { + "bbox": [ + 313, + 498, + 556, + 666 + ], + "type": "inline_equation", + "content": "(1 / E)" + }, + { + "bbox": [ + 313, + 498, + 556, + 666 + ], + "type": "text", + "content": ". Additionally, we visualize the normalized number of text and image tokens assigned to each expert across layers. Figure 25 shows clear modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases but rises again in the very last layers. This suggests that early and final layers require more modality specialization compared to mid-layers. Additionally, we observe several experts shared between text and image modalities, a phenomenon not present in hard-routed or predefined modality-specific experts." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 83, + 103, + 529, + 165 + ], + "blocks": [ + { + "bbox": [ + 83, + 103, + 529, + 165 + ], + "lines": [ + { + "bbox": [ + 83, + 103, + 529, + 165 + ], + "spans": [ + { + "bbox": [ + 83, + 103, + 529, + 165 + ], + "type": "table", + "html": "
L(N,D) = E + A/Nα + B/DβvsL(N,D,S) = A/Nα + B/Dβ + C(1-S)λ + d(1-S)δNγ
ModelEABαβλδγCd
L(N,D) (Eq 1)2.15838177346590.7100.372-----
L(N,D,S) [2]1.0788146600.58900.37200.20.20.709561.0788381475
", + "image_path": "045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 83, + 256, + 529, + 319 + ], + "blocks": [ + { + "bbox": [ + 189, + 166, + 420, + 177 + ], + "lines": [ + { + "bbox": [ + 189, + 166, + 420, + 177 + ], + "spans": [ + { + "bbox": [ + 189, + 166, + 420, + 177 + ], + "type": "text", + "content": "Table 12. Scaling laws for sparse native multimodal models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 256, + 529, + 319 + ], + "lines": [ + { + "bbox": [ + 83, + 256, + 529, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 256, + 529, + 319 + ], + "type": "table", + "html": "
AccuracyCIDEr
AVGVQAv2TextVQAOKVQAGQAVizWizCOCOTextCaps
4-E-top-140.055264.06814.28441.94861.4618.51662.20134.08
8-E-top-141.693465.68417.5542.90863.2619.06567.87739.63
8-E-top-242.854666.46619.16245.34463.9419.36165.98841.649
8-E-top-2 finegrained39.90462.7615.5841.8861.617.757.5235.42
", + "image_path": "954ecdf74a27126739fe55ea72c130d722bf017ede0c1fb5950a4e172b17fdcd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 190, + 321, + 418, + 332 + ], + "lines": [ + { + "bbox": [ + 190, + 321, + 418, + 332 + ], + "spans": [ + { + "bbox": [ + 190, + 321, + 418, + 332 + ], + "type": "text", + "content": "Table 13. SFT results with different MoEs configurations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 58, + 413, + 555, + 630 + ], + "blocks": [ + { + "bbox": [ + 58, + 413, + 555, + 630 + ], + "lines": [ + { + "bbox": [ + 58, + 413, + 555, + 630 + ], + "spans": [ + { + "bbox": [ + 58, + 413, + 555, + 630 + ], + "type": "image", + "image_path": "925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 640, + 555, + 673 + ], + "lines": [ + { + "bbox": [ + 55, + 640, + 555, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 640, + 555, + 673 + ], + "type": "text", + "content": "Figure 26. Scaling laws for native multimodal models. From left to right: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. The scaling exponents are very close for all models. However, MoEs leads to overall lower loss (smaller multiplicative constant) and takes longer to saturate." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 112, + 217, + 264 + ], + "blocks": [ + { + "bbox": [ + 56, + 112, + 217, + 264 + ], + "lines": [ + { + "bbox": [ + 56, + 112, + 217, + 264 + ], + "spans": [ + { + "bbox": [ + 56, + 112, + 217, + 264 + ], + "type": "image", + "image_path": "a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 224, + 112, + 380, + 264 + ], + "blocks": [ + { + "bbox": [ + 224, + 112, + 380, + 264 + ], + "lines": [ + { + "bbox": [ + 224, + 112, + 380, + 264 + ], + "spans": [ + { + "bbox": [ + 224, + 112, + 380, + 264 + ], + "type": "image", + "image_path": "92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 389, + 132, + 545, + 263 + ], + "blocks": [ + { + "bbox": [ + 389, + 132, + 545, + 263 + ], + "lines": [ + { + "bbox": [ + 389, + 132, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 389, + 132, + 545, + 263 + ], + "type": "image", + "image_path": "7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 57, + 284, + 214, + 416 + ], + "blocks": [ + { + "bbox": [ + 57, + 284, + 214, + 416 + ], + "lines": [ + { + "bbox": [ + 57, + 284, + 214, + 416 + ], + "spans": [ + { + "bbox": [ + 57, + 284, + 214, + 416 + ], + "type": "image", + "image_path": "fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 224, + 284, + 380, + 415 + ], + "blocks": [ + { + "bbox": [ + 224, + 284, + 380, + 415 + ], + "lines": [ + { + "bbox": [ + 224, + 284, + 380, + 415 + ], + "spans": [ + { + "bbox": [ + 224, + 284, + 380, + 415 + ], + "type": "image", + "image_path": "2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 389, + 285, + 545, + 415 + ], + "blocks": [ + { + "bbox": [ + 389, + 285, + 545, + 415 + ], + "lines": [ + { + "bbox": [ + 389, + 285, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 389, + 285, + 545, + 415 + ], + "type": "image", + "image_path": "170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 58, + 437, + 223, + 583 + ], + "blocks": [ + { + "bbox": [ + 58, + 437, + 223, + 583 + ], + "lines": [ + { + "bbox": [ + 58, + 437, + 223, + 583 + ], + "spans": [ + { + "bbox": [ + 58, + 437, + 223, + 583 + ], + "type": "image", + "image_path": "7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 647, + 555, + 669 + ], + "lines": [ + { + "bbox": [ + 55, + 647, + 555, + 669 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 555, + 669 + ], + "type": "text", + "content": "Figure 27. Scaling laws for native multimodal models. From top to bottom: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. From left to right: cross-entropy on the validation set of image-caption, interleaved and text-only data." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 225, + 437, + 388, + 583 + ], + "blocks": [ + { + "bbox": [ + 225, + 437, + 388, + 583 + ], + "lines": [ + { + "bbox": [ + 225, + 437, + 388, + 583 + ], + "spans": [ + { + "bbox": [ + 225, + 437, + 388, + 583 + ], + "type": "image", + "image_path": "87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 391, + 437, + 552, + 583 + ], + "blocks": [ + { + "bbox": [ + 391, + 437, + 552, + 583 + ], + "lines": [ + { + "bbox": [ + 391, + 437, + 552, + 583 + ], + "spans": [ + { + "bbox": [ + 391, + 437, + 552, + 583 + ], + "type": "image", + "image_path": "b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 151, + 595, + 460, + 635 + ], + "blocks": [ + { + "bbox": [ + 151, + 595, + 460, + 635 + ], + "lines": [ + { + "bbox": [ + 151, + 595, + 460, + 635 + ], + "spans": [ + { + "bbox": [ + 151, + 595, + 460, + 635 + ], + "type": "table", + "html": "
0.289B0.494B1B1.748B2.430B3.714B
0.275B0.464B0.932B1.627B2.280B3.354B
0.275B0.464B0.932B1.627B2.280B3.354B
", + "image_path": "19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 92, + 214, + 223 + ], + "blocks": [ + { + "bbox": [ + 56, + 92, + 214, + 223 + ], + "lines": [ + { + "bbox": [ + 56, + 92, + 214, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 92, + 214, + 223 + ], + "type": "image", + "image_path": "091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 223, + 92, + 380, + 223 + ], + "blocks": [ + { + "bbox": [ + 294, + 76, + 328, + 85 + ], + "lines": [ + { + "bbox": [ + 294, + 76, + 328, + 85 + ], + "spans": [ + { + "bbox": [ + 294, + 76, + 328, + 85 + ], + "type": "text", + "content": "45-45-10" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 223, + 92, + 380, + 223 + ], + "lines": [ + { + "bbox": [ + 223, + 92, + 380, + 223 + ], + "spans": [ + { + "bbox": [ + 223, + 92, + 380, + 223 + ], + "type": "image", + "image_path": "f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 228, + 326, + 238 + ], + "lines": [ + { + "bbox": [ + 296, + 228, + 326, + 238 + ], + "spans": [ + { + "bbox": [ + 296, + 228, + 326, + 238 + ], + "type": "text", + "content": "40-20-40" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 391, + 93, + 544, + 222 + ], + "blocks": [ + { + "bbox": [ + 391, + 93, + 544, + 222 + ], + "lines": [ + { + "bbox": [ + 391, + 93, + 544, + 222 + ], + "spans": [ + { + "bbox": [ + 391, + 93, + 544, + 222 + ], + "type": "image", + "image_path": "58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 57, + 244, + 213, + 376 + ], + "blocks": [ + { + "bbox": [ + 57, + 244, + 213, + 376 + ], + "lines": [ + { + "bbox": [ + 57, + 244, + 213, + 376 + ], + "spans": [ + { + "bbox": [ + 57, + 244, + 213, + 376 + ], + "type": "image", + "image_path": "cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 224, + 245, + 379, + 375 + ], + "blocks": [ + { + "bbox": [ + 224, + 245, + 379, + 375 + ], + "lines": [ + { + "bbox": [ + 224, + 245, + 379, + 375 + ], + "spans": [ + { + "bbox": [ + 224, + 245, + 379, + 375 + ], + "type": "image", + "image_path": "ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 381, + 326, + 390 + ], + "lines": [ + { + "bbox": [ + 296, + 381, + 326, + 390 + ], + "spans": [ + { + "bbox": [ + 296, + 381, + 326, + 390 + ], + "type": "text", + "content": "30-30-40" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 391, + 245, + 544, + 375 + ], + "blocks": [ + { + "bbox": [ + 391, + 245, + 544, + 375 + ], + "lines": [ + { + "bbox": [ + 391, + 245, + 544, + 375 + ], + "spans": [ + { + "bbox": [ + 391, + 245, + 544, + 375 + ], + "type": "image", + "image_path": "fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 58, + 397, + 213, + 529 + ], + "blocks": [ + { + "bbox": [ + 58, + 397, + 213, + 529 + ], + "lines": [ + { + "bbox": [ + 58, + 397, + 213, + 529 + ], + "spans": [ + { + "bbox": [ + 58, + 397, + 213, + 529 + ], + "type": "image", + "image_path": "d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 224, + 396, + 379, + 527 + ], + "blocks": [ + { + "bbox": [ + 224, + 396, + 379, + 527 + ], + "lines": [ + { + "bbox": [ + 224, + 396, + 379, + 527 + ], + "spans": [ + { + "bbox": [ + 224, + 396, + 379, + 527 + ], + "type": "image", + "image_path": "3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 533, + 326, + 543 + ], + "lines": [ + { + "bbox": [ + 296, + 533, + 326, + 543 + ], + "spans": [ + { + "bbox": [ + 296, + 533, + 326, + 543 + ], + "type": "text", + "content": "20-40-40" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 391, + 397, + 544, + 527 + ], + "blocks": [ + { + "bbox": [ + 391, + 397, + 544, + 527 + ], + "lines": [ + { + "bbox": [ + 391, + 397, + 544, + 527 + ], + "spans": [ + { + "bbox": [ + 391, + 397, + 544, + 527 + ], + "type": "image", + "image_path": "af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 58, + 550, + 213, + 679 + ], + "blocks": [ + { + "bbox": [ + 58, + 550, + 213, + 679 + ], + "lines": [ + { + "bbox": [ + 58, + 550, + 213, + 679 + ], + "spans": [ + { + "bbox": [ + 58, + 550, + 213, + 679 + ], + "type": "image", + "image_path": "80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 224, + 550, + 379, + 679 + ], + "blocks": [ + { + "bbox": [ + 224, + 550, + 379, + 679 + ], + "lines": [ + { + "bbox": [ + 224, + 550, + 379, + 679 + ], + "spans": [ + { + "bbox": [ + 224, + 550, + 379, + 679 + ], + "type": "image", + "image_path": "97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 391, + 550, + 544, + 679 + ], + "blocks": [ + { + "bbox": [ + 391, + 550, + 544, + 679 + ], + "lines": [ + { + "bbox": [ + 391, + 550, + 544, + 679 + ], + "spans": [ + { + "bbox": [ + 391, + 550, + 544, + 679 + ], + "type": "image", + "image_path": "11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 154, + 685, + 462, + 702 + ], + "blocks": [ + { + "bbox": [ + 154, + 685, + 462, + 702 + ], + "lines": [ + { + "bbox": [ + 154, + 685, + 462, + 702 + ], + "spans": [ + { + "bbox": [ + 154, + 685, + 462, + 702 + ], + "type": "image", + "image_path": "20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 708, + 555, + 731 + ], + "lines": [ + { + "bbox": [ + 55, + 708, + 555, + 731 + ], + "spans": [ + { + "bbox": [ + 55, + 708, + 555, + 731 + ], + "type": "text", + "content": "Figure 28. Scaling laws for early-fusion native multimodal models. Our runs across different training mixtures (Image-caption-Interleaved-Text) and FLOPs. We visualize the final validation loss on 3 data types: HQITP (left), Obelics (middle) and DCLM (right)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 741 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_content_list.json b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..309de4c1f931b65aacda08963fd28b9ed78c4cc7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_content_list.json @@ -0,0 +1,3848 @@ +[ + { + "type": "text", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "text_level": 1, + "bbox": [ + 153, + 123, + 816, + 143 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mirac Suzgun1 Mert Yuksekgonul1 Federico Bianchi2 Dan Jurafsky1 James Zou1,2", + "bbox": [ + 176, + 188, + 792, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 446, + 232, + 524, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite their impressive performance on complex tasks, current language models (LMs) typically operate in a vacuum: Each input query is processed separately, without retaining insights from previous attempts. Here, we present Dynamic Cheatsheet (DC), a lightweight framework that endows a black-box LM with a persistent, evolving memory. Rather than repeatedly re-discovering or re-committing the same solutions and mistakes, DC enables models to store and reuse accumulated strategies, code snippets, and general problem-solving insights at inference time. This test-time learning enhances performance substantially across a range of tasks without needing explicit ground-truth labels or human feedback. Leveraging DC, Claude 3.5 Sonnet's accuracy more than doubled on AIME math exams once it began retaining algebraic insights across questions. Similarly, GPT-4o's success rate on the Game of 24 puzzle increased from about $10\\%$ to $99\\%$ after the model discovered and reused a Python-based solution. In tasks prone to arithmetic mistakes, such as balancing equations, DC enabled GPT-4o and Claude to reach near-perfect accuracy by recalling previously validated code, whereas their baselines stagnated around $50\\%$ . Beyond arithmetic challenges, DC yields notable accuracy gains on knowledge-demanding tasks. Claude achieved a $9\\%$ improvement in GPQA-Diamond and an $8\\%$ boost on MMLU-Pro Engineering and Physics problems. Crucially, DC's memory is self-curated, focusing on concise, transferable snippets rather than entire transcripts, thereby facilitating meta-learning and avoiding context ballooning. Unlike fine-tuning or static retrieval methods, DC adapts LMs' problem-solving skills on the fly, without modifying their underlying parameters, and offers a practical approach for continuously refining responses and cutting routine errors. Overall, our findings present DC as a promising approach for augmenting LMs with persistent memory, bridging the divide between isolated inference events and the cumulative, experience-driven learning characteristic of human cognition.*", + "bbox": [ + 116, + 252, + 854, + 539 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg", + "image_caption": [ + "Figure 1: Comparison of different baselines and Dynamic Cheatsheet (DC) variants on challenging reasoning benchmarks, including AIME exams and GPQA-Diamond. Baseline represents a standard prompting approach with minimal guidance, while DC- $\\varnothing$ (a stronger baseline) contains explicit structured instructions for problem solving, as well as for Python code generation and execution, but lacks a memory component. Our proposed DC-Cu and DC-RS variants incorporate an evolving, text-based memory to enhance inference-time learning. Results (accuracy, %) demonstrate substantial improvements, with Claude 3.5 Sonnet gaining $27\\%$ on AIME 2024 and $30\\%$ on AIME 2025 under DC-Cu. In Game of 24, GPT-4o leaps from $10\\%$ (baseline) to $99\\%$ under DC-RS, reflecting its ability to retain and apply Python-based solutions efficiently. Similarly, Claude 3.5 Sonnet's accuracy more than doubles in Math Equation Solver, reaching $98\\%$ . Overall, these findings highlight the impact of test-time learning through controlled memory augmentation and efficient retrieval." + ], + "image_footnote": [], + "bbox": [ + 114, + 551, + 478, + 742 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 551, + 617, + 742 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 551, + 859, + 742 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07952v1 [cs.LG] 10 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Stanford University $^{2}$ Together AI. $\\boxtimes$ Correspondence to: msuzgun@stanford.edu and jamesz@stanford.edu. \n*We release all our data, results, and code at http://github.com/suzgunmirac/dynamic-cheatsheet.", + "bbox": [ + 107, + 876, + 774, + 904 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 86, + 83, + 217, + 99 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modern large language models (LLMs) can tackle complex reasoning tasks, answer various questions, and generate extensive texts. Yet they still suffer from one critical limitation: once deployed, these models are fixed prior to deployment and typically retain no explicit or implicit memory of past questions, successes, or mistakes during inference. They approach each new problem de novo, often re-deriving the same insights—and re-committing the same errors. In contrast, human cognition stands on a foundation of incremental learning, continuously internalizing new experiences and solutions into a persistent mental model.", + "bbox": [ + 84, + 109, + 475, + 275 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we present Dynamic Cheatsheet (DC), a simple and intuitive framework that endows black-box LLMs with a persistent, evolving memory at inference time. Rather than fine-tuning weights (for instance, through dynamic evaluation (Krause et al., 2019) or domain adaptation (Gururangan et al., 2020)) or retrieving facts from a massive static corpus (as in traditional retrieval-augmented generation systems (Guu et al., 2020; Zhang et al., 2024b)), DC dynamically curates a compact library of reusable strategies, solution sketches, and code snippets. Either before or after each query, DC enables the system to decide which lessons to store, what to discard, and how to refine existing entries—thus effectively \"learning\" from successes and failures. It is a flexible online-learning approach that enables a black-box LLM to improve itself without needing any explicit ground truth labels or human feedback.", + "bbox": [ + 84, + 282, + 475, + 525 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The overall workflow of DC is intuitive and compelling. In one version of DC (DC-Cu.), when presented with a new query, the LM first consults its external memory to see if any prior insights, strategies or relevant model solutions have been stored. It then proposes a solution by combining the retrieved insights with its own internal reasoning capabilities. Upon generating an answer, it then proceeds to a curation phase that updates the memory: If the approach seems to be correct, useful, or practical, DC codifies it in its memory for future use; if an error surfaces, DC may revise or prune faulty heuristics. This all happens without gradient-based parameter updates, so computational overhead remains modest, and compatibility with black-box APIs (e.g., GPT-4 or Claude) is fully preserved. See Figure 4.", + "bbox": [ + 84, + 532, + 475, + 743 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We tested DC across multiple challenging benchmarks and observed that it increases performance and reduces repetitive mistakes. On AIME 2024, Claude 3.5 Sonnet jumped from $23\\%$ to $50\\%$ accuracy, more than doubling its baseline score, by retaining algebraic and combinatorial insights. Likewise, it gained $30\\%$ accuracy on AIME 2025. Notably, these improvements hold in knowledge-intensive tasks as well. On GPQA-Diamond, which tests specialized domain questions, DC lifted Claude by over $9\\%$ . In MMLU-Pro Engineering and Physics, it provided up to an $8\\%$ boost in", + "bbox": [ + 84, + 750, + 475, + 902 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg", + "image_caption": [ + "Figure 2: Overall task performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (BL) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + ], + "image_footnote": [], + "bbox": [ + 503, + 84, + 879, + 314 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "performance by allowing the model to maintain a \" toolkit\" of formulas and general problem-solving patterns.", + "bbox": [ + 496, + 383, + 888, + 416 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An even more striking and compelling example is the Game of 24, a puzzle that requires the solver to combine four digits into an arithmetic expression equaling 24. GPT-4o's baseline performance (10%) increased to 99% under DC. Early in the test sequence, the model discovered that an efficient Python brute-force solver eliminated all manual guesswork. Once this snippet was stored, GPT-4o simply retrieved it for subsequent queries, avoiding manual arithmetic entirely. We saw a similar pattern in Math Equation Balancer, where GPT-4o and Claude soared from 45-50% to 98-100% by \"recalling\" a straightforward code-based approach instead of manually fumbling with numeric manipulations.", + "bbox": [ + 495, + 422, + 887, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Nonetheless, DC is not a panacea. We found that smaller models, such as GPT-4o-mini, benefit from DC in limited amounts. These models generate too few correct solutions in these challenging tasks in the first place, leaving the memory populated with flawed or incomplete strategies. Worse, they struggle to refine stored content. DC can amplify the strengths of models that can already produce high-quality outputs, but not fix foundational gaps in reasoning.", + "bbox": [ + 495, + 611, + 887, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We also note that DC differs from naive \"append the entire conversation history\" in-context learning approaches. Under DC, memory is carefully curated, focusing on succinct, useful, and transferable knowledge over raw transcripts. This prevents ballooning context lengths (Liu et al., 2024a) and helps ensure that repeated retrieval remains tractable. Indeed, part of DC's contribution is in formalizing a mechanism for selective, evolving retention—storing just enough to solve the next set of tasks without drowning in an ever-growing text buffer. Cf. (Karpicke & Roediger III, 2008; Roediger & Butler, 2011; Karpicke & Blunt, 2011)", + "bbox": [ + 495, + 739, + 888, + 905 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg", + "image_caption": [ + "Figure 3: Algorithmic illustration of the Dynamic Cheatsheet (DC)-based approaches and other baseline methods. Here, Gen represents the solution generator model, Cur the memory curator, and Retr the retriever. While we use the same black-box LLMs for both generation and curation, we differentiate their roles via task-agnostic instructions (prompts). The retrieval mechanism ranks historical inputs based on cosine similarity with the current query, selecting the most relevant past examples along with their generated solutions." + ], + "image_footnote": [], + "bbox": [ + 86, + 80, + 352, + 215 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 80, + 616, + 215 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 80, + 885, + 215 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Dynamic Cheatsheet (DC) Methodology", + "text_level": 1, + "bbox": [ + 84, + 286, + 444, + 304 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DC, in its core, includes an external, non-parametric memory that evolves in tandem with the LLM's inference process. Rather than fine-tuning the underlying weights, DC tracks successes and failures of the model at test time, then selectively stores heuristics, strategies, or short textual artifacts that can guide the LLM in future instances. Notably, this approach respects the black-box nature of many commercial LLM APIs: no gradient-based updates are required, and the model's core parameters remain untouched.", + "bbox": [ + 84, + 311, + 475, + 446 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. DC: Building Blocks and Iterative Loop", + "text_level": 1, + "bbox": [ + 84, + 464, + 397, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The DC framework consists of two core modules: generation and curation. Both modules can easily operate on top of the same LM (prompted differently) or on separate LMs.", + "bbox": [ + 84, + 488, + 475, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1.1. Solution Generation with Memory", + "text_level": 1, + "bbox": [ + 84, + 547, + 370, + 563 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let's consider a sequence of inputs $(x_{1},x_{2},\\ldots ,x_{n})$ , where each $x_{i}\\sim \\mathcal{D}_{\\mathrm{test}}$ indicates a new query or problem posed to the model sampled from the same distribution $\\mathcal{D}_{\\mathrm{test}}$ (a typical setting in online learning). The distribution $\\mathcal{D}_{\\mathrm{test}}$ is unknown to us. At the $i$ -th step, the model is provided with both the new query $x_{i}$ and the current memory state $M_{i}$ which captures knowledge gleaned from previous successes and failures. We denote the solution generator by Gen:", + "bbox": [ + 84, + 571, + 473, + 691 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 700, + 473, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $\\tilde{y}_i$ is the candidate solution produced by the model. $M_{i}$ helps condition the model to reuse or adapt previously stored solutions, insights, techniques, or heuristics.", + "bbox": [ + 84, + 723, + 475, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1.2. Memory Curation Step", + "text_level": 1, + "bbox": [ + 84, + 782, + 294, + 799 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "After the generator produces its answer $\\tilde{y}_i$ to $x_i$ , the curator, Cur, updates the current content of the memory:", + "bbox": [ + 84, + 806, + 475, + 837 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nM _ {i + 1} = \\operatorname {C u r} \\left(M _ {i}, x _ {i}, \\tilde {y} _ {i}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 844, + 473, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During memory curation, Cur mainly considers: (i) the usefulness and generalizability of the newly produced answer", + "bbox": [ + 83, + 875, + 475, + 905 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(i.e., if $\\tilde{y}_i$ is correct or provides valuable and generalizable insights, it is distilled into a form suitable for later reference), (ii) refinement or removal of existing memory entries (i.e., if an existing memory entry was incorrect or superseded by a more efficient or versatile strategy, Cur may remove or update it), and (iii) clarity and compactness of the entire memory (i.e., memory entries are consolidated to retain succinct, high-impact references and heuristics).", + "bbox": [ + 496, + 287, + 888, + 409 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg", + "image_caption": [ + "Figure 4: Illustration of Dynamic Cheatsheet (DC-Cu variant)." + ], + "image_footnote": [], + "bbox": [ + 503, + 421, + 879, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cur does not have access to ground-truth labels; so, it has to assess the correctness and efficiency of the solutions by itself before updating the memory. In our experiments, we instruct a single model to perform this crucial step. Yet, in practice, Cur can be implemented as a series of steps that instruct multiple tools and models, through different prompts, to verify the validity and efficiency of the solution and to transform the raw solution text into even more generalizable, reliable, and efficient strategies, insights, and code snippets.", + "bbox": [ + 496, + 571, + 887, + 708 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We refer to this version of DC above as DC-Cu (short for DC-Cumulative). Under DC-Cu, the system first performs solution generation based on the current memory (Eqn. 1) and then updates the memory (Eqn. 2), by cumulatively expanding and refining the memory items thus far. Unlike DC-RS, which is discussed in the next part, DC-Cu, does not contain a retrieval component, however.", + "bbox": [ + 496, + 714, + 885, + 820 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. DC with Retrieval & Synthesis (DC-RS)", + "text_level": 1, + "bbox": [ + 496, + 835, + 808, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DC-Cu has two potential drawbacks. First, it updates the memory after processing an input query, rather than refining it before generating a response. This means the model lacks", + "bbox": [ + 496, + 859, + 885, + 905 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 694, + 71 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the opportunity to incorporate new insights from the current query while reasoning through its solution. Second, DC-Cu does not store or revisit past input-output pairs unless explicitly retained in memory. This omission prevents the model from directly retrieving and leveraging historical responses, which can be particularly valuable in benchmarks covering diverse topics or domains (e.g., GPQA-Diamond).", + "bbox": [ + 84, + 84, + 475, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address these issues, DC-RS modifies the sequence of memory updates and introduces a retrieval mechanism, Retr, into the curation process. Retr allows the model to retrieve the most relevant past input-output pairs from its knowledge base. By refining the memory before responding and retrieving prior cases when needed, DC-RS enhances the model's adaptability and reasoning efficiency.", + "bbox": [ + 84, + 198, + 473, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "DC-RS first retrieves $^{1}$ top- $k$ most similar inputs, along with their model-generated outputs, from previously seen examples, which we denote by $R_{i}^{(k)}$ (or simply $R_{i}$ ).2 It then passes these select examples, $R_{i}$ , along with the most recent memory content, $M_{i-1}$ , to the curator to update the memory, that is to get $M_{i}$ . Finally, it uses the generator to produce $\\tilde{y}_{i}$ , given $x_{i}$ and $M_{i}$ . We summarize all these steps below:", + "bbox": [ + 84, + 310, + 475, + 419 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR _ {i} = \\operatorname {R e t r} \\left(x _ {i}, \\left\\{\\left(x _ {j}, \\tilde {y} _ {j}\\right) \\right\\} _ {j < i}, k\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 431, + 473, + 449 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nM _ {i} = \\operatorname {C u r} \\left(M _ {i - 1}, x _ {i}, R _ {i}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 450, + 473, + 467 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 469, + 473, + 486 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3. Baselines", + "text_level": 1, + "bbox": [ + 84, + 505, + 184, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To quantify the efficacy of memory-driven test-time learning, we compare DC and its variants to four baselines:", + "bbox": [ + 84, + 527, + 475, + 558 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Baseline prompting (BL). This plain \"vanilla\" prompting approach, with minimal instructions, simply prompts the model without any iterative memory or retrieval mechanism. It reflects traditional one-off inference. $^3$", + "(2) DC- $\\varnothing$ (empty memory). To isolate the effect of memory curation, this DC baseline always keeps the memory content effectively empty. $^4$ DC- $\\varnothing$ allows us to measure how much performance improvement arises purely from storing and reusing knowledge over time. While there is no continuous knowledge storage or strategy reuse, this method follows the instructions in Figure 13 and is therefore a strong baseline." + ], + "bbox": [ + 84, + 566, + 475, + 739 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(3) Full-History Appending (FH). This is a naive approach that appends the entire conversation history to the model input without any curation or truncation. FH can exceed context-window limits and include redundant or low-value information, but nonetheless, it provides a useful comparison for methods that actively curate content.", + "(4) Dynamic Retrieval (DR). A final baseline uses retrieval but no curation. Specifically, for each new query, it retrieves the most similar past interactions and directly pastes them, verbatim, into the prompt. DR can help the model see relevant input-output pairs but not directly codify any abstract or generalized solutions.7" + ], + "bbox": [ + 496, + 84, + 885, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3 (above) contains pseudocodes of all the primary methods and baselines considered in this paper.", + "bbox": [ + 496, + 281, + 885, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Experimental Setup", + "text_level": 1, + "bbox": [ + 496, + 330, + 689, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Tasks and Datasets", + "text_level": 1, + "bbox": [ + 496, + 356, + 663, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To rigorously evaluate DC's effectiveness, we focus on challenging tasks where contemporary state-of-the-art LLMs, such as GPT-4o and Claude 3.5, still face limitations. Rather than evaluating on benchmarks where performance is near saturation (e.g., BBH (Suzgun et al., 2023b), MGSM (Shi et al., 2023), GSM8K (Cobbe et al., 2021)), we prioritize tasks that demand multi-step reasoning, heuristic search, strategic adaptation, and cumulative learning—that is, tasks in which iterative memory refinement can yield tangible improvements over time. $^{8}$", + "bbox": [ + 495, + 378, + 887, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall, the selected datasets include algorithmic, logical, and domain-specific reasoning tasks, each chosen to stress-test the model's ability to refine its reasoning over time.", + "bbox": [ + 495, + 537, + 887, + 584 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(a) AIME 2020-2025 Exam Questions: The American Invitational Mathematics Examination (AIME) is a prestigious high-school competition featuring complex problems across algebra, combinatorics, number theory, geometry, and probability. These questions require deep mathematical reasoning and multi-step problem-solving. We consider three subsets: AIME $2024^{9}$ (30 questions), AIME $2025^{10}$ (30 questions), and AIME $2020 - 2024^{11}$ (133 questions).", + "bbox": [ + 495, + 590, + 887, + 712 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 694, + 70 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1We used OpenAI's text-embedding-3-small model to map input queries (raw questions) to embedding vectors.", + "bbox": [ + 84, + 748, + 472, + 775 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "2We set $k$ to 3 in all our experiments. (Initially, we considered higher top- $k$ values such as 5 and 7, but the gain was insignificant.)", + "bbox": [ + 84, + 775, + 472, + 801 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3Please refer to Figure 12 to see the full instruction (prompt) used in BLh. We experimented with the zero-shot CoT approach (Kojima et al., 2022) in our preliminary experiments, but it did not yield any gains (Arcuschin et al., 2025). We, therefore, did not include it as a baseline method in our experiments.", + "bbox": [ + 84, + 801, + 473, + 866 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "4We adopt the generator prompt template used in DC-RS, namely Figure 13, for DC- $\\emptyset$ , though we replace the memory placeholder with the text \"empty cheatsheet).", + "bbox": [ + 84, + 866, + 475, + 904 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "5We consider and test this baseline only on AIME 2024 and AIME 2025, which are relatively small in their size (each contains 30 examples) compared to other benchmarks.", + "bbox": [ + 495, + 720, + 885, + 760 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "6We use the generator prompt template in Figure 13 again, but include the entire raw input-output pairs from the previous steps in the memory—without any curation, truncation, or synthesis.", + "bbox": [ + 495, + 760, + 883, + 799 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{7}\\mathrm{FH}$ is similar to DR, but we include only a select (most relevant) input-output pairs in the memory content.", + "bbox": [ + 495, + 799, + 887, + 825 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "8We release all the original input-output pairs in our codebase: http://github.com/suzgunmirac/dynamic-cheatsheet.", + "bbox": [ + 495, + 825, + 885, + 852 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{9}$ huggingface.co/datasets/HuggingFaceH4/aime_2024", + "bbox": [ + 519, + 852, + 874, + 864 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "10huggingface.co/datasets/yentinglin/aime_2025.", + "bbox": [ + 519, + 866, + 854, + 878 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "11huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024.", + "bbox": [ + 519, + 878, + 903, + 893 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(b) GPQA-Diamond (Rein et al., 2024): A high-quality, difficult subset of the Graduate-Level Google-Proof Q&A (GPQA) benchmark, GPQA-Diamond contains 198 expert-validated questions across natural sciences, including biology, chemistry, and physics. These questions were correctly answered by domain experts but often missed by non-experts, making them ideal for evaluating DC's ability to handle complex, multi-hop reasoning tasks.", + "bbox": [ + 84, + 84, + 475, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(c) Game of 24 (Yao et al., 2023; Suzgun & Kalai, 2024): A heuristic-driven arithmetic challenge where the objective is to form an expression that evaluates to 24 using four given numbers exactly once. For instance, if the input values were \"7 7 8 11,\" one valid answer would be \"8*(7+7-11).\" This task emphasizes systematic search, strategic reasoning, and pattern recognition. We use the 100 examples from (Suzgun & Kalai, 2024) to assess DC's capacity for refining computational heuristics and strategy over manual attempts.", + "bbox": [ + 84, + 212, + 475, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(d) Math Equation Balancer: Focused on elementary arithmetic reasoning, this dataset requires the model to complete equations by inserting the appropriate operators to form valid expressions. The task emphasizes the sequential placement of operators, as illustrated by the example “1 ? 2 ? 3 = 6,” where the model must identify the correct operators to satisfy the equation (“1 + 2 + 3 = 6” or “1 * 2 * 3 = 6”). We compiled 250 arithmetic expressions for this task.", + "bbox": [ + 84, + 356, + 475, + 477 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(e) MMLU-Pro (Engineering and Physics) (Wang et al., 2024b): A professional-level subset of the MMLU benchmark focused on physics and engineering. All questions are presented in a multiple-choice form. The original dataset contains 1,299 physics and 969 engineering questions. We sampled 250 questions from each subset.", + "bbox": [ + 84, + 484, + 475, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Language Models", + "text_level": 1, + "bbox": [ + 86, + 592, + 243, + 607 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate the efficacy of DC across a range of language models. Our selection includes both state-of-the-art LLMs such as GPT-4o and Claude 3.5 Sonnet and their smaller-scale counterparts (namely, GPT-4o-mini and Claude 3.5 Haiku), as well as models such as DeepSeek R1 that are designed specifically for reasoning-intensive tasks.", + "bbox": [ + 84, + 614, + 475, + 705 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Evaluation Protocol", + "text_level": 1, + "bbox": [ + 86, + 723, + 259, + 737 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure standardized and reliable evaluation, all models are instructed to format their final answers in a structured, machine-readable format. All model answers are expected to be wrapped in the following XML-style tags:", + "bbox": [ + 84, + 746, + 475, + 806 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " (final answer) ", + "guess_lang": "txt", + "bbox": [ + 117, + 811, + 235, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This explicit format ensures accurate and consistent parsing, eliminating errors arising from extraneous text or ambiguous outputs. Once extracted, the final answers are evaluated", + "bbox": [ + 84, + 859, + 475, + 905 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "using their corresponding task-specific accuracy metric.", + "bbox": [ + 496, + 84, + 864, + 99 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1. Accuracy Metrics", + "text_level": 1, + "bbox": [ + 496, + 114, + 668, + 128 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the diversity of the tasks, we use different accuracy metrics tailored to the specific requirements of each dataset.", + "bbox": [ + 496, + 138, + 885, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Soft Match (SM) is a lenient metric that considers an answer correct if it matches the ground truth after ignoring minor formatting differences, such as punctuation or whitespace variations. We apply this metric to GPQA-Diamond, and MMLU Pro (Engineering and Physics), in which questions are presented in a multiple-choice format.", + "bbox": [ + 496, + 175, + 887, + 267 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Functionally Correct (FC) is an even more flexible metric that evaluates whether the model's output satisfies the task-specific constraints, even if the exact numeral presentation or formatting differs slightly from the reference solution. We apply this metric to the Game of 24, Math Equation Balancer, and AIME benchmarks.", + "bbox": [ + 496, + 273, + 887, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Main Results", + "text_level": 1, + "bbox": [ + 496, + 383, + 633, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. DC enables test-time learning and reduces repetitive errors", + "text_level": 1, + "bbox": [ + 496, + 410, + 823, + 440 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One of the most compelling illustrations of DC's capabilities emerges from the Game of 24 task. As seen in Table 1, GPT-4o's baseline accuracy on this arithmetic puzzle was just $10\\%$ . Under DC-RS, its performance increased to $99\\%$ , illustrating DC's capacity for test-time learning and iterative refinement. Early in the task sequence, GPT-4o discovered a reliable, Python-based brute-force method to solve Game of 24 and later on recognized the repetitive structure of the problem. The model then encoded this approach into its memory. Once established, GPT-4o consistently retrieved and applied the more or less same Python solution for subsequent examples, leading to rapid and accurate results.", + "bbox": [ + 495, + 448, + 887, + 630 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The performance under DC- $\\varnothing$ (19%) further highlights the positive impact of memory curation and retrieval. DC- $\\varnothing$ uses the same core generator but keeps the memory empty, thus lacking the mechanism to store and reuse solutions. The large gap between 19% (DC- $\\varnothing$ ) and 99% (DC-RS) confirms that effective memory usage, in which past solutions are retrieved and generalized, is the main driver of GPT-4o's transformation from ad-hoc solver to near-perfect performer in Game of 24.", + "bbox": [ + 495, + 637, + 885, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In contrast, Claude 3.5 Sonnet showed marginal gain, moving from $12\\%$ to $14\\%$ . Despite DC's scaffolding, Claude did not internalize a generalized approach but instead continued to rely on manual arithmetic solutions. This underscores that while DC provides the framework for test-time adaptation, its ultimate success hinges on the model's innate capacity to identify and encode robust, reusable strategies.", + "bbox": [ + 495, + 780, + 887, + 887 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/b2dd0d0d80362dd89fd243809f6becce8148dccb76a5cfdb8c154a007b7f43d8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TasksClaude 3.5 SonnetGPT-4o
BLDC-∅DRDC-Cu.DC-RSBLDC-∅DRDC-Cu.DC-RS
AIME 202423.336.743.350.046.720.036.726.736.740.0
AIME 20256.723.323.336.730.06.710.010.016.720.0
AIME 2020–246.730.139.138.440.69.824.124.120.324.8
Game of 2412.010.011.014.014.010.019.06.093.099.0
GPQA Diamond59.660.163.661.168.757.157.155.158.157.1
Math Eqn. Balancer44.856.460.410097.850.088.010010099.2
MMLU Pro Eng.61.257.265.266.867.653.251.648.844.051.2
MMLU Pro Physics74.075.680.477.682.075.670.875.670.475.2
", + "bbox": [ + 117, + 80, + 854, + 253 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Performance comparison of Dynamic Cheatsheet (DC) variants for Claude 3.5 Sonnet and GPT-4o across multiple benchmarks. BL (Baseline): standard inference without memory; DC-∅ (Empty Memory): includes structured problem-solving and explicit tool-use instructions but no memory retention mechanism; DR (Dynamic Retrieval): uses retrieval but lacks curated memory updates; DC-Cu (Cumulative Memory): iteratively accumulates model solutions but lacks retrieval; and DC-RS (Retrieval & Synthesis): combines retrieval with memory refinement/synthesis. These results highlight substantial accuracy gains under DC: Claude 3.5 Sonnet's AIME 2024 accuracy jumps by $27\\%$ under DC-Cu, and GPT-4o's Game of 24 accuracy leaps from $10\\%$ to $99\\%$ under DC-RS.", + "bbox": [ + 83, + 265, + 887, + 342 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. DC provides substantial improvements across various challenging reasoning benchmarks", + "text_level": 1, + "bbox": [ + 84, + 351, + 436, + 382 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Beyond Game of 24, DC yielded significant gains across a range of complex mathematical and algorithmic tasks. See Table 1. The results below illustrate how iterative solution reuse can helpful in complex reasoning problems.", + "bbox": [ + 84, + 390, + 473, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "AIME Exam Problems. The AIME exams provided some of the most dramatic improvements under DC. For Claude 3.5 Sonnet, performance on AIME 2020-2024 surged from $6.7\\%$ to $40.6\\%$ under DC-RS. A similar upward trend appeared on AIME 2024 (23.3% to $50.0\\%$ ) and AIME 2025 (6.7% to $36.7\\%$ ) under DC-Cu. DC-Cu, where the model curates memory after processing the input and does not involve a retrieval stage, also proved potent in recent exam sets, achieving highest accuracy scores in AIME 2024 and 2025. GPT-4o also showed some noteworthy gains. Its AIME 2024 performance raised from $20.0\\%$ to $40.0\\%$ under DC-RS, while its AIME 2025 score climbed from $6.7\\%$ to $20.0\\%$ . These boosts suggest that structured test-time-produced memory can help tackle difficult math problems.", + "bbox": [ + 84, + 458, + 475, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "GPQA-Diamond. On GPQA-Diamond, Claude 3.5 Sonnet improved from $59.6\\%$ to $68.7\\%$ under DC-RS, a robust $9.1\\%$ gain purely from test-time adaptation. DR $(63.6\\%)$ demonstrated that retrieval alone helps, but the further jump to $68.7\\%$ highlights how memory curation and synthesis can yield additional benefits. By contrast, GPT-4o experienced only a slight increase from $57.1\\%$ to $58.1\\%$ with DC-RS; our quantitative analysis of the model's outputs and memory showed us that retrieval can, in some cases, introduce confusion, especially if suboptimal examples are recalled. This contrast between different models underscores how the success of retrieval-based adaptation partly depends on model-specific generation and curation capabilities.", + "bbox": [ + 84, + 676, + 475, + 875 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Math Equation Balancer. As Table 1 shows, the base-", + "bbox": [ + 84, + 880, + 475, + 896 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "line performance for Claude 3.5 Sonnet (44.8%) rose to $98 - 100\\%$ with DC-RS and DC-Cu, while GPT-4o similarly improved from $50.0\\%$ to near-perfect accuracy (99-100%). As observed in Game of 24, the models quickly learned an algorithmic or Python-based balancing routine, stored it in external memory, and repeatedly retrieved it, achieving exceptional consistency once the core method was established.", + "bbox": [ + 495, + 351, + 887, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MMLU-Pro Tasks. For MMLU-Pro Eng. and Physics, Claude 3.5 Sonnet exhibited consistent gains, rising by up to $8.0\\%$ in Physics (from $74\\%$ to $82\\%$ ). Our examination of the curated memory entries shows that Claude temporarily stored and retrieved compact \"reference guides\" on engineering and physics principles, which might have proved beneficial for thematically similar questions. GPT-4o, on the other hand, observed slight decreases from the baseline on these tasks, suggesting that domain complexity and baseline knowledge gaps may attenuate DC's benefits if curated memory is less reliable or consistent.", + "bbox": [ + 495, + 464, + 888, + 630 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Memory curation (DC) fosters generalization and provides gains over full-history-appending (FH)", + "text_level": 1, + "bbox": [ + 496, + 647, + 875, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Whereas FH (full-history) simply appends every previous dialogue turn into the prompt, DC actively filters and synthesizes high-value content. As shown in Table 2, Sonnet under FH reached $26.7\\%$ accuracy in 2024 questions, while DC-based methods hit $50.0\\%$ . Similarly, GPT-4o managed a baseline of $20.0\\%$ but fell to $6.7\\%$ using FH, in direct contrast to $40.0\\%$ with DC-RS. Excessive uncurated input-output pairs can not only overwhelm the model's context window, dilute crucial insights and hamper retrieval efficiency, but also significantly increase inference costs over time. On the other hand, DC's selective memory curation ensures that problem-solving tips or code snippets remain readily accessible without clutter, thus facilitating more robust and consistent improvements across consecutive queries.", + "bbox": [ + 495, + 686, + 888, + 898 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 71 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg", + "image_caption": [ + "Figure 5: Excerpt from GPT-4o's external memory after processing 100 examples from Game of 24 under DC-RS. Early in the test sequence, the model discovered a Python-based brute-force solution, stored it, and subsequently retrieved it for subsequent puzzles. This shift to structured code reuse resulted in a dramatic performance increase from $10\\%$ to $99\\%$ accuracy, eliminating arithmetic errors and redundant problem-solving efforts." + ], + "image_footnote": [], + "bbox": [ + 94, + 88, + 462, + 473 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. DC fosters efficient tool usage / code generation", + "text_level": 1, + "bbox": [ + 84, + 603, + 447, + 618 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A successful behavior under DC is the LLMs' inclination toward code generation to handle computationally intensive tasks. GPT-4o's near-complete reliance on Python scripts for Game of 24 exemplifies this shift. Rather than performing manual arithmetic repeatedly, GPT-4o recognized that code-based brute force is more systematic. It generated, stored, and iteratively refined a Python function that tested permutations of numbers and operations, allowing it to solve each instance of Game of 24 with high accuracy.", + "bbox": [ + 84, + 626, + 475, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This inclination toward automation illustrates DC's potential to nurture efficient tool-usage: the capacity to recognize when external tools (e.g., Python, symbolic math engines, or dedicated solvers) are more robust than internally verbalized chain-of-thought calculations. While we restricted the scope of tool usage to Python interpreter in this study, future expansions could easily explore a broader suite of tools, potentially amplifying LLM performance in specialized domains such as computational biology or legal research.", + "bbox": [ + 84, + 768, + 478, + 906 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4c761fb056c52ac2dda0586b9ecf8f17216ffcf9839d5398281625731563d48e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TasksClaude 3.5 SonnetGPT-4o
BLFHDC-Cu.BLFHDC-RS
AIME 202423.326.750.020.013.340.0
AIME 20256.76.736.76.73.320.0
", + "bbox": [ + 501, + 80, + 883, + 156 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Performance breakdown of BL (default baseline), FH (full history), DC-Cu, and DC-RS approaches under AIME 2024 and 2025. FH stores all past queries and outputs, while DC-Cu and DC-RS selectively refine stored memory. Results indicate that targeted memory curation in DC-RS leads to greater accuracy gains compared to full history retention, supporting the need for structured, self-updating knowledge mechanisms.", + "bbox": [ + 496, + 165, + 885, + 255 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Model scale and capacity impact DC effectiveness", + "text_level": 1, + "bbox": [ + 496, + 266, + 877, + 282 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our current results indicate that the effectiveness of DC is strongly tied to the model's scale and underlying generative capacity. While Claude 3.5 Sonnet and GPT-4o showed notable gains across multiple tasks under DC, their smaller counterparts, Claude 3.5 Haiku and GPT-4o-mini, showed more limited and inconsistent gains.", + "bbox": [ + 495, + 289, + 885, + 381 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3, for instance, shows that Claude 3.5 Haiku achieved moderate gains under DC, with its accuracy on AIME 2024 rising from $10.0\\%$ (baseline) to $36.7\\%$ under DC-Cu. But gains on AIME 2025 were weaker, reaching only $13.3\\%$ under DC- $\\varnothing$ and DC-Cu. Interestingly, GPQA-Diamond saw an improvement from $43.4\\%$ to $49.0\\%$ under DC-RS,", + "bbox": [ + 495, + 387, + 888, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "GENERAL META-REASONING STRATEGIES", + "text_level": 1, + "bbox": [ + 509, + 512, + 728, + 523 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 511, + 526, + 596, + 536 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 511, + 542, + 583, + 554 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Systematic Problem Analysis Framework (Reference: Q1-Q20)", + "bbox": [ + 511, + 555, + 807, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For complex mathematical problems:", + "bbox": [ + 511, + 566, + 694, + 577 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. State problem requirements clearly", + "2. List key observations and theorems applicable", + "3. Identify patterns and relationships", + "4. Break into manageable sub-problems", + "5. Verify against examples", + "6. Consider computational approach when analytical solution is complex", + "7. For grid problems, analyze movement patterns and symmetries", + "8. For combinatorial problems, use appropriate counting techniques", + "9. Implement verification code when possible", + "10. Consider edge cases and constraints", + "11. For grid coloring problems, consider row/column patterns", + "" + ], + "bbox": [ + 511, + 578, + 862, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 511, + 720, + 571, + 732 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Example application:", + "bbox": [ + 511, + 733, + 619, + 744 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Requirements: list all given conditions", + "2. Observations: identify applicable theorems", + "3. Patterns: look for structural relationships", + "4. Sub-problems: break into steps", + "5. Verification: test against examples", + "6. Implementation: use Python for verification", + "" + ], + "bbox": [ + 511, + 744, + 736, + 824 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 511, + 830, + 601, + 840 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Count: 20", + "bbox": [ + 511, + 840, + 562, + 851 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 6: Example of Claude 3.5 Sonnet's curated memory after processing 20 AIME 2024 questions under DC-Cu. The memory captures key solution strategies, enables the model to generalize across similar computational problems, and boosts its accuracy.", + "bbox": [ + 496, + 867, + 885, + 919 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 694, + 71 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg", + "image_caption": [ + "Figure 7: Cumulative performance progression under DC for GPQA-Diamond (left) and Game of 24 (right). In GPQA-Diamond, Claude 3.5 Sonnet steadily improves as it accumulates relevant knowledge snippets (the first few points are noisy because $y$ measures cumulative accuracy). Meanwhile, in Game of 24, GPT-4o rapidly transitions from trial-and-error arithmetic to near-perfect performance once it recognizes and stores a Python-based solution. These trends highlight DC's ability to enhance accuracy via iterative test-time learning." + ], + "image_footnote": [], + "bbox": [ + 94, + 84, + 480, + 262 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 83, + 875, + 263 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "suggesting that retrieval-based adaptation might still provide utility in smaller models.", + "bbox": [ + 84, + 332, + 473, + 362 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9d6affcf252b705e810fdd65349c4205d86943abc5b929094e9861ab0e1c47f1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TasksClaude 3.5 Haiku
BLDC-∅DC-Cu.DC-RS
AIME 202410.026.736.730.0
AIME 20250.013.313.310.0
GPQA-Diamond43.441.943.749.0
TasksGPT-4o-mini
BLDC-∅DC-Cu.DC-RS
AIME 202416.720.013.313.3
AIME 202510.013.313.316.7
GPQA-Diamond34.334.333.832.3
", + "bbox": [ + 106, + 378, + 454, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Performance of Claude 3.5 Haiku and GPT-4o-mini, the smaller counterparts of Claude 3.5 Sonnet and GPT-4o, across AIME (2024, 2025) and GPQA-Diamond. These smaller models struggle to fully leverage DC, suggesting that memory-based adaptation is most effective when the base LM has sufficient generative competence. Performance improvements are more muted, highlighting the dependency of DC on model-scale reasoning ability.", + "bbox": [ + 84, + 566, + 473, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "That said, GPT-4o-mini (Table 3) showed even smaller gains, with some variants leading to slight declines in performance. On AIME 2024, DC- $\\varnothing$ provided a $20.0\\%$ boost, but both DC-Cu and DC-RS performed worse than baseline. AIME 2025 showed a minor improvement, peaking at $16.7\\%$ under DC-RS. On GPQA-Diamond, GPT-4o-mini's performance, however, remained largely stagnant or slightly declined under memory-based adaptation, suggesting that it struggled to leverage stored information effectively.", + "bbox": [ + 84, + 662, + 475, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "These imply two drawbacks of smaller models under DC:", + "bbox": [ + 84, + 806, + 467, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(a) Generative competence. For DC to be effective, the base model must produce correct solutions with sufficient frequency to populate the memory with high-quality, reusable strategies. Smaller models, such as GPT-4o-mini and Claude 3.5 Haiku, generate correct solutions less reliably,", + "bbox": [ + 84, + 830, + 475, + 905 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "leading to a sparse or low-quality memory repository. As a result, iterative refinement stalls because the stored knowledge consists mostly of incorrect or partial attempts.", + "bbox": [ + 496, + 332, + 887, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(b) Contextual and memory curation limitations. Smaller models struggle with long-context understanding/generation and memory retrieval, leading to inefficient or irrelevant memory usage. Unlike their larger counterparts, which can more effectively retrieve and synthesize solutions from stored heuristics, smaller models often fail to retrieve the most relevant past solutions or misapply retrieved knowledge to new problems. This results in inconsistent performance under DC-RS, particularly in tasks requiring complex reasoning or strategic adaptation.", + "bbox": [ + 496, + 383, + 888, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Test-time task similarity and example ordering can amplify DC's overall impact", + "text_level": 1, + "bbox": [ + 496, + 551, + 885, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another central insight is that DC thrives when test examples share structural similarities. In both Game of 24 and Math Equation Balancer, once GPT-4o identified an efficient solution, it reused it consistently for subsequent tasks. Similarly, in AIME, discovering a geometry or combinatorics strategy allowed for easy transfer across questions of analogous structure. Consequently, tasks arranged to present related questions early may accelerate and improve the model's test-time learning. This suggests that curriculum-style learning (Bengio et al., 2009), where simpler or archetypal problems are presented first to build a repository of valid heuristics, may potentially bootstrap performance. Cf. (Lopez-Paz & Ranzato, 2017; Zelikman et al., 2022; Chen et al., 2024)", + "bbox": [ + 495, + 590, + 888, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Additional Analyses and Discussions", + "text_level": 1, + "bbox": [ + 496, + 821, + 828, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reasoning and information efficiency. One key insight is that DC reduces the need to \"reinvent the wheel\" for each query. By encoding and reusing well-established techniques", + "bbox": [ + 495, + 847, + 885, + 893 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(e.g., Python-based solving for Game of 24), models can bypass repeated rediscovery of the same strategies. This significantly cuts down reasoning overhead and token usage in subsequent queries, though the initial cost of discovering a robust approach and curating it remains non-trivial.", + "bbox": [ + 84, + 84, + 473, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DC performs better than majority voting (MV). To test if DC provides advantages over conventional MV at inference, we also tested Sonnet on AIME 2024 and 2025 using both approaches. MV, which selects the most common answer from three independent generations, yielded no improvements over single-shot inference. As seen in Table 4, on AIME 2024, MV performed identically to the baseline $(23.3\\%)$ , while on AIME 2025, it remained at $6.7\\%$ , offering no tangible gain. Even with DC- $\\emptyset$ , MV slightly underperformed $(33.3\\%$ vs. $36.7\\%)$ . In contrast, DC-Cu outperformed MV, reaching $50.0\\%$ on AIME 2024 and $36.7\\%$ on AIME 2025. Unlike MV, which passively aggregates outputs, DC actively refines knowledge over time, eliminating errors and improving solution quality. This confirms that memory-based adaptation is far more effective than simple statistical voting in complex reasoning tasks.", + "bbox": [ + 86, + 167, + 475, + 409 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/a96fc554773f447b0b92412be8f2e3f8819c76f4e33c639b283090006003112c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TasksClaude 3.5 Sonnet
BLMV(BL)DC-∅MV(DC-∅)DC-Cu.
AIME 202423.323.3336.733.350.0
AIME 20256.76.723.323.336.7
", + "bbox": [ + 89, + 422, + 473, + 496 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Comparison of majority voting (MV) with DC on AIME.", + "bbox": [ + 84, + 505, + 473, + 520 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Clustering of errors and corrections. Our experiments suggest that errors and their corrections often cluster in a latent embedding space. See Figure 10. Once a model acquires a high-quality heuristic for a cluster of related queries, it can apply this knowledge to tightly embedded neighbors. However, faulty heuristics that slip into memory can be equally amplified. Ensuring that the memory remains \"clean\" thus requires careful curation and, if necessary, pruning to avoid propagating erroneous strategies.", + "bbox": [ + 84, + 534, + 473, + 671 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Transferability of memory content across models. We also observed that larger models, such as Claude 3.5 Sonnet and GPT-4o, can sometimes produce higher-quality strategies that, in principle, could benefit smaller models if the memory is transferred. However, if a smaller model lacks the generative capacity to interpret or refine those strategies correctly, its performance can stall or degrade. In our ablation experiments, we observed mixed results. This indicates that memory entries, while helpful, cannot fully compensate for inadequate base capability.", + "bbox": [ + 84, + 676, + 473, + 829 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Long-context generation versus understanding. Most large LLMs excel at processing lengthy inputs but struggle to generate comparably long $^{12}$ and well-organized outputs.", + "bbox": [ + 84, + 835, + 473, + 881 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DC's memory curation after each query can demand precise reproduction or modification of prior knowledge. We observed instances where the model merely references or abbreviates the existing memory (e.g., \"Previous content [...] preserved\") instead of explicitly rewriting it. Such truncated memory updates can reduce the quality of stored heuristics over time. Potential solutions include maintaining a structured, external database that the LM can reference without regenerating large swaths of text each time.", + "bbox": [ + 495, + 84, + 885, + 220 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Retrieval bottlenecks and noise. While retrieval-based variants (e.g., DC-RS) can substantially improve accuracy, poorly filtered retrieval mechanisms can introduce confusion, particularly when presented with highly diverse or loosely related queries. For example, in our experiments, GPT-4o's performance occasionally dipped in GPQA-Diamond due to suboptimal retrieval choices. This underscores the importance of robust retrieval methods (e.g., dense vector search, advanced ranking algorithms) that can reliably surface higher quality exemplars or heuristics while suppressing irrelevant or contradictory texts.", + "bbox": [ + 495, + 227, + 885, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Hierarchical and modular memory. As LLM deployments scale, specialized domains may benefit from subdividing or hierarchically organizing memory. For instance, a system could maintain separate curated memories for topics like combinatorics or physics, each updated by a specialized retrieval or curation mechanism. This may reduce the load on a unified memory store and help isolate errors within their respective domains, with the goal of further improving the clarity and reliability of retrieved heuristics.", + "bbox": [ + 495, + 401, + 885, + 537 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Time and token complexity. Although DC requires memory curation after each query, it optimizes efficiency over time by reducing redundant computation and token usage.[13] As the model retrieves and refines solutions, memory maintenance becomes a net gain rather than a cost. However, its sequential structure still poses challenges for large-scale parallel or batch tasks requiring independent inference.", + "bbox": [ + 495, + 545, + 885, + 650 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Smaller or more specialized models and R1 experiments. Finally, we note that smaller models, such as GPT-4o-mini, show limited gains under DC, as seen in Table 3. Additional experiments with \"R1\" models such as DeepSeek R1 and o1 similarly showed minimal or inconsistent improvements. In these cases, these models' generative ability appears too restricted to produce reliable strategies for storage or to interpret retrieved heuristics effectively. The solutions were far too verbose and long. Without sufficiently accurate and efficient base solutions, memory curation cannot yield substantial gains. This limitation ties back to the core premise that effective DC demands a capable foundation model to seed and refine the curated knowledge.", + "bbox": [ + 495, + 657, + 885, + 854 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "13On AIME 2024, Claude Sonnet averaged 370 tokens under BL, 494 under DC- $\\emptyset$ , 1035 under DC-RS, and 1831 under DC-Cu.", + "bbox": [ + 496, + 863, + 883, + 888 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "12See, e.g., (Liu et al., 2024b).", + "bbox": [ + 104, + 888, + 284, + 904 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Overall, DC offers a useful and practical framework for continuous, test-time learning in LLMs. Our findings emphasize the synergy between model capacity and memory curation, the importance of structural task similarity and retrieval precision, and the benefits of offloading repeated computations to flexible external stores (e.g., Python scripts). At the same time, alternative mechanisms (e.g., specialized sub-memories or adaptive example ordering) and more sophisticated retrieval techniques (e.g., topological clustering) remain promising directions for further research.", + "bbox": [ + 84, + 84, + 475, + 234 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 86, + 255, + 243, + 272 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We thank Batu El, Sabri Eyuboglu, Tayfun Gur, Emily Shen, Jake Silberg, Elana Simon, and Kyle Swanson for their helpful comments and suggestions. We also thank the members of the James Zou Lab at Stanford for their feedback in the early stages of this project. Suzgun gratefully acknowledges the support of an HAI-SAP Fellowship.", + "bbox": [ + 84, + 280, + 475, + 372 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 86, + 391, + 181, + 406 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amari, S.-I. Natural gradient works efficiently in learning. Neural computation, 10(2):251-276, 1998.", + "Arcuschin, I., Janiak, J., Krzyzanowski, R., Rajamanoharan, S., Nanda, N., and Conmy, A. Chain-of-thought reasoning in the wild is not always faithful. In Workshop on Reasoning and Planning for Large Language Models, 2025. URL https://openreview.net/forum?id=L8094Whth0.", + "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations, 2023.", + "Bengio, Y., Louradour, J., Collobert, R., and Weston, J. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pp. 41-48, 2009.", + "Besta, M., Blach, N., Kubicek, A., Gerstenberger, R., Podstawski, M., Gianinazzi, L., Gajda, J., Lehmann, T., Niewiadomski, H., Nczyk, P., et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024.", + "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206-2240. PMLR, 2022." + ], + "bbox": [ + 86, + 415, + 475, + 904 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Bottou, L. and Cun, Y. Large scale online learning. Advances in neural information processing systems, 16, 2003.", + "Bottou, L. and Le Cun, Y. On-line learning for very large data sets. Applied stochastic models in business and industry, 21(2):137-151, 2005.", + "Boudiaf, M., Mueller, R., Ben Ayed, I., and Bertinetto, L. Parameter-free online test-time adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8344-8353, 2022.", + "Bulatov, A., Kuratov, Y., and Burtsev, M. Recurrent memory transformer. Advances in Neural Information Processing Systems, 35:11079-11091, 2022.", + "Chen, Z., Deng, Y., Yuan, H., Ji, K., and Gu, Q. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024.", + "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Feng, T., Han, P., Lin, G., Liu, G., and You, J. Thought-retriever: Don't just retrieve raw data, retrieve thoughts, 2024. URL https://openreview.net/forum?id=SkDNQbMQba.", + "Feng, Y., Li, F., Song, Z., Zheng, B., and Koehn, P. Learn to remember: Transformer with recurrent memory for document-level machine translation. arXiv preprint arXiv:2205.01546, 2022.", + "Golovneva, O., O'Brien, S., Pasunuru, R., Wang, T., Zettlemoyer, L., Fazel-Zarandi, M., and Celikyilmaz, A. Pathfinder: Guided search over multi-step reasoning paths. arXiv preprint arXiv:2312.05180, 2023.", + "Gou, Z., Shao, Z., Gong, Y., Shen, Y., Yang, Y., Duan, N., and Chen, W. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023.", + "Graves, A. Generating sequences with recurrent neural networks. arXiv preprint arXiv:1308.0850, 2013.", + "Graves, A., Wayne, G., and Danihelka, I. Neural Turing machines. arXiv preprint arXiv:1410.5401, 2014.", + "Gururangan, S., Marasovic, A., Swayamdipta, S., Lo, K., Beltagy, I., Downey, D., and Smith, N. A. Don't stop pretraining: Adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964, 2020." + ], + "bbox": [ + 500, + 84, + 887, + 905 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929-3938. PMLR, 2020.", + "He, Z., Karlinsky, L., Kim, D., McAuley, J., Krotov, D., and Feris, R. Camelot: Towards large language models with training-free consolidated associative memory. arXiv preprint arXiv:2402.13449, 2024.", + "Joulin, A. and Mikolov, T. Inferring algorithmic patterns with stack-augmented recurrent nets. Advances in neural information processing systems, 28, 2015.", + "Karpicke, J. D. and Blunt, J. R. Retrieval practice produces more learning than elaborative studying with concept mapping. Science, 331(6018):772-775, 2011.", + "Karpicke, J. D. and Roediger III, H. L. The critical importance of retrieval for learning. science, 319(5865): 966-968, 2008.", + "Karpukhin, V., Oguz, B., Min, S., Lewis, P. S., Wu, L., Edunov, S., Chen, D., and Yih, W.-t. Dense passage retrieval for open-domain question answering. In EMNLP (1), pp. 6769-6781, 2020.", + "Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=Hk1BjCEKvH.", + "Kojima, T., Gu, S. S., Reid, M., Matsuo, Y., and Iwasawa, Y. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35: 22199-22213, 2022.", + "Krause, B., Kahembwe, E., Murray, I., and Renals, S. Dynamic evaluation of transformer language models. arXiv preprint arXiv:1904.08378, 2019.", + "Lazaridou, A., Gribovskaya, E., Stokowiec, W. J., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering, 2023. URL https://openreview.net/forum?id=hFCUPkSSRE.", + "Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., Kuttler, H., Lewis, M., Yih, W.-t., Rocktaschel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in neural information processing systems, 33:9459-9474, 2020.", + "Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics, 12:157-173, 2024a." + ], + "bbox": [ + 86, + 84, + 475, + 904 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Liu, X., Dong, P., Hu, X., and Chu, X. Longgenbench: Long-context generation benchmark. arXiv preprint arXiv:2410.04199, 2024b.", + "Liu, Y., Kothari, P., Van Delft, B., Bellot-Gurlet, B., Mordan, T., and Alahi, A. Ttt++: When does self-supervised test-time training fail or thrive? Advances in Neural Information Processing Systems, 34:21808-21820, 2021.", + "Long, J. Large language model guided tree-of-thought. arXiv preprint arXiv:2305.08291, 2023.", + "Lopez-Paz, D. and Ranzato, M. Gradient episodic memory for continual learning. Advances in neural information processing systems, 30, 2017.", + "Lu, P., Peng, B., Cheng, H., Galley, M., Chang, K.-W., Wu, Y. N., Zhu, S.-C., and Gao, J. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36: 43447-43478, 2023.", + "Madaan, A., Tandon, N., Clark, P., and Yang, Y. Memory-assisted prompt editing to improve gpt-3 after deployment. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2833–2861, 2022.", + "Madaan, A., Tandon, N., Gupta, P., Hallinan, S., Gao, L., Wegreffe, S., Alon, U., Dziri, N., Prabhumoye, S., Yang, Y., et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023.", + "McCloskey, M. and Cohen, N. J. Catastrophic interference in connectionist networks: The sequential learning problem. In Psychology of learning and motivation, volume 24, pp. 109-165. Elsevier, 1989.", + "Mikolov, T., Karafiát, M., Burget, L., Cernocký, J., and Khudanpur, S. Recurrent neural network based language model. In *Interspeech*, volume 2, pp. 1045–1048. Makuhari, 2010.", + "Munkhdalai, T., Sordoni, A., Wang, T., and Trischler, A. Metalearned neural memory. Advances in Neural Information Processing Systems, 32, 2019.", + "Niu, S., Wu, J., Zhang, Y., Chen, Y., Zheng, S., Zhao, P., and Tan, M. Efficient test-time model adaptation without forgetting. In International conference on machine learning, pp. 16888-16905. PMLR, 2022.", + "Qin, Y., Liang, S., Ye, Y., Zhu, K., Yan, L., Lu, Y., Lin, Y., Cong, X., Tang, X., Qian, B., et al. Toollm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023." + ], + "bbox": [ + 498, + 84, + 885, + 904 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rannen-Triki, A., Bornschein, J., Pascanu, R., Hutter, M., György, A., Galashov, A., Teh, Y. W., and Titsias, M. K. Revisiting dynamic evaluation: Online adaptation for large language models. arXiv preprint arXiv:2403.01518, 2024.", + "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Ti67584b98.", + "Roediger, H. L. and Butler, A. C. The critical role of retrieval practice in long-term retention. Trends in cognitive sciences, 15(1):20-27, 2011.", + "Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N., and Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551, 2023.", + "Shen, Y., Song, K., Tan, X., Li, D., Lu, W., and Zhuang, Y. HuggingGPT: Solving AI tasks with chatGPT and its friends in hugging face. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=yHdTscY6Ci.", + "Shi, F., Fried, D., Ghazvininejad, M., Zettlemoyer, L., and Wang, S. I. Natural language to code translation with execution. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 3533-3546, 2022.", + "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., Das, D., and Wei, J. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp.", + "Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. REPLUG: Retrievalaugmented black-box language models. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8371-8384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.463. URL https://aclanthology.org/2024.nacl-long.463/.", + "Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36:8634-8652, 2023." + ], + "bbox": [ + 86, + 84, + 478, + 906 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Sun, Y., Wang, X., Liu, Z., Miller, J., Efros, A., and Hardt, M. Test-time training with self-supervision for generalization under distribution shifts. In International conference on machine learning, pp. 9229-9248. PMLR, 2020.", + "Sun, Y., Li, X., Dalal, K., Xu, J., Vikram, A., Zhang, G., Dubois, Y., Chen, X., Wang, X., Koyejo, S., et al. Learning to (learn at test time): Rnns with expressive hidden states. arXiv preprint arXiv:2407.04620, 2024.", + "Surís, D., Menon, S., and Vondrick, C. Vipergpt: Visual inference via python execution for reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 11888-11898, 2023.", + "Suzgun, M. and Kalai, A. T. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024.", + "Suzgun, M., Gehrmann, S., Belinkov, Y., and Shieber, S. M. Memory-augmented recurrent neural networks can learn generalized dyck languages. arXiv preprint arXiv:1911.03329, 2019.", + "Suzgun, M., Melas-Kyriazi, L., and Jurafsky, D. Follow the wisdom of the crowd: Effective text generation via minimum bayes risk decoding. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 4265-4293, 2023a.", + "Suzgun, M., Scales, N., Scharli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q., Chi, E., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, 2023b.", + "Suzgun, M., Shieber, S. M., and Jurafsky, D. string2string: A modern python library for string-to-string algorithms. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pp. 278-285, 2024.", + "Syed, N. A., Liu, H., and Sung, K. K. Handling concept drifts in incremental learning with support vector machines. In Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 317-321, 1999.", + "Thrun, S. and Mitchell, T. M. Lifelong robot learning. Robotics and autonomous systems, 15(1-2):25-46, 1995.", + "Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023." + ], + "bbox": [ + 500, + 84, + 887, + 906 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 71 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wang, D., Shelhamer, E., Liu, S., Olshausen, B., and Darrell, T. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020.", + "Wang, X., Wei, J., Schuurmans, D., Le, Q. V., Chi, E. H., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw.", + "Wang, Y., Gao, Y., Chen, X., Jiang, H., Li, S., Yang, J., Yin, Q., Li, Z., Li, X., Yin, B., et al. Memoryllm: Towards self-updatable large language models. arXiv preprint arXiv:2402.04624, 2024a.", + "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., Li, T., Ku, M., Wang, K., Zhuang, A., Fan, R., Yue, X., and Chen, W. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b. URL https://openreview.net/forum?id=y10DM6R2r3.", + "Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.", + "Weston, J., Chopra, S., and Bordes, A. Memory networks. arXiv preprint arXiv:1410.3916, 2014.", + "Yang, L., Yu, Z., Zhang, T., Cao, S., Xu, M., Zhang, W., Gonzalez, J. E., and Cui, B. Buffer of thoughts: Thought-augmented reasoning with large language models. Advances in Neural Information Processing Systems, 37: 113519-113544, 2025.", + "Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T. L., Cao, Y., and Narasimhan, K. Tree of Thoughts: Deliberate problem solving with large language models, 2023.", + "Yuksekgonul, M., Bianchi, F., Boen, J., Liu, S., Lu, P., Huang, Z., Guestrin, C., and Zou, J. Optimizing generative ai by backpropagating language model feedback. Nature, 639:609-616, 2025.", + "Zelikman, E., Wu, Y., Mu, J., and Goodman, N. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, 2022.", + "Zhang, K., Kang, Y., Zhao, F., and Liu, X. LLM-based medical assistant personalization with short- and long-term memory coordination. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for" + ], + "bbox": [ + 86, + 84, + 475, + 905 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2386-2398, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naac1-long.132. URL https://aclanthology.org/2024.naac1-long.132/.", + "Zhang, M., Levine, S., and Finn, C. Memo: Test time robustness via adaptation and augmentation. Advances in neural information processing systems, 35:38629-38642, 2022.", + "Zhang, T., Patil, S. G., Jain, N., Shen, S., Zaharia, M., Stoica, I., and Gonzalez, J. E. RAFT: Adapting language model to domain specific RAG. In First Conference on Language Modeling, 2024b. URL https://openreview.net/forum?id=rzQGHXNReU.", + "Zhong, Z., Lei, T., and Chen, D. Training language models with memory augmentation. In Goldberg, Y., Kozareva, Z., and Zhang, Y. (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5657-5673, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.382. URL https://aclanthology.org/2022.emnlp-main.382/.", + "Zhou, D., Scharli, N., Hou, L., Wei, J., Scales, N., Wang, X., Schuurmans, D., Cui, C., Bousquet, O., Le, Q., et al. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625, 2022." + ], + "bbox": [ + 498, + 84, + 885, + 531 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 71 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 478, + 922, + 493, + 934 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. Background & Related Work", + "text_level": 1, + "bbox": [ + 88, + 83, + 359, + 99 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1. Test-time learning (online learning)", + "text_level": 1, + "bbox": [ + 88, + 109, + 367, + 125 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Test-time learning—also referred to as online or incremental learning (adaptation)—encompasses a family of methods in which a stochastic model updates its predictions by incorporating information seen during inference, without undergoing conventional, full-scale offline finetuning. Early versions of test-time adaptation focused on local or transductive learning, where a model re-fit or re-weighted its parameters with each new test instance or batch (McCloskey & Cohen, 1989; Thrun & Mitchell, 1995; Amari, 1998; Syed et al., 1999; Bottou & Cun, 2003; Bottou & Le Cun, 2005, inter alia). In computer vision, for example, methods like test-time training have been shown to mitigate domain shifts by optimizing a self-supervised loss on incoming data (Wang et al., 2020; Sun et al., 2020; Liu et al., 2021; Boudiaf et al., 2022; Niu et al., 2022; Zhang et al., 2022; Sun et al., 2024). In the context of natural-language generation, test-time adaptation has appeared under terms such as \"dynamic evaluation\" (Mikolov et al., 2010; Graves, 2013; Krause et al., 2019; Rannen-Triki et al., 2024), in which a language model is updated with gradient steps on the test-time data itself.", + "bbox": [ + 88, + 133, + 473, + 448 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "However, directly updating language model weights at test time can be computationally expensive and requires the capacity to modify parameters. For large-scale, black-box APIs (e.g., GPT-3 or Claude), one often lacks the ability to perform parameter updates easily, thereby making such an approach difficult, if not completely infeasible (Shi et al., 2024). To address this, a growing body of work has explored parameter-free adaptation, whereby one structurally modifies immediate model inputs (e.g., prompting) or draws from external memory to \"update\" the model's effective reasoning. Our approach aligns with this direction by allowing an LM to iteratively record solutions, explanations, or heuristics in an external memory component over successive interactions, avoiding weight updates entirely.", + "bbox": [ + 88, + 458, + 473, + 667 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the broader test-time learning literature, reflexive, compositional, and iterative refinement approaches like Reflexion (Shinn et al., 2023), Self-Refine (Madaan et al., 2023), (Self-)Critic (Gou et al., 2023), Chameleon (Lu et al., 2023), Meta-Prompting (Suzgun & Kalai, 2024), and Self-RAG (Asai et al., 2023) inter alia, use feedback loops or verification mechanisms to correct mistakes in solutions. TextGrad (Yuksekgonul et al., 2025) similarly draws on the notion of \"textual gradients\" as an alternative to parameter-based gradients and provides a pathway for improvement based on the content of mistakes. Our proposed DC framework differs by focusing explicitly on storing generalizable heuristics, solutions, or meta-level insights that can be repeatedly retrieved and applied across tasks, not just to correct a single solution. Furthermore, DC does not require a", + "bbox": [ + 88, + 676, + 473, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "new training loop for each batch or scenario; instead, the memory itself is updated to reflect newly found solutions, errors, or strategies without touching the model weights.", + "bbox": [ + 501, + 85, + 883, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2. Test-time compute and reasoning", + "text_level": 1, + "bbox": [ + 501, + 147, + 764, + 161 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "It is now widely known and accepted that contemporary LLMs such as GPT-4 can exhibit substantial improvements in reasoning and generation capability when additional compute is devoted to inference-time strategies (e.g., chain-of-thought prompting (Wei et al., 2022; Kojima et al., 2022; Zhou et al., 2022), tree-of-thought expansions (Yao et al., 2023; Long, 2023), minimum Bayes risk decoding (Suzgun et al., 2023a; Shi et al., 2022; Golovneva et al., 2023), majority-vote sampling (Wang et al., 2023)). Prompting methods such as Tree-of-Thought (Yao et al., 2023), Graph-of-Thought (Besta et al., 2024), and other non-linear compositional reasoning paradigms systematically enlarge the inference-time search space. They allow models to explore various reasoning paths and exploit consensus or iterative corrections to arrive at more accurate and reliable conclusions (Wei et al., 2022; Wang et al., 2023).", + "bbox": [ + 501, + 170, + 885, + 411 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "However, these expansions come at the cost of increased computational overhead per test instance (Yao et al., 2023). They are, however, typically ephemeral: once a solution is generated, subsequent tasks or input samples do not generally benefit from the heavy compute spent earlier, unless the user manually engineers advanced prompt-sharing or in-context demonstration strategies. Cf. (Zelikman et al., 2022). Our work, on the other hand, aims to reduce repeated overhead across multiple test instances of a similar domain by building a memory that persists from one query to the next. This memory not only reduces repetitive mistakes, but also consolidates and codifies robust solution strategies—effectively amortizing or \"sharing\" the cost of initial reflection across future tasks.[14]", + "bbox": [ + 501, + 420, + 885, + 628 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Another related thread involves tool usage or code execution (Schick et al., 2023; Lu et al., 2023; Shen et al., 2023; Qin et al., 2023; Surís et al., 2023; Suzgun & Kalai, 2024). These studies have explored how LLMs can call external Python interpreters, symbolic solvers, or other specialized", + "bbox": [ + 501, + 638, + 885, + 713 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "14Some lines of work—such as majority voting or sampling-based self-consistency—combine multiple inference passes for a single question but still lack a persistent knowledge base that spans different queries. DC differs in that we treat consecutive tasks in a sequence as a chance to refine a persistent, external store of learned lessons. The memory curation step selectively compiles relevant solutions, heuristics, expansions, or code blocks into a form that can be reused for upcoming queries. Thus, while the compute for the first few tasks may be higher, future tasks become simpler because the system can consult and adapt previously curated knowledge. This approach echoes the underlying motivation of test-time training—performing ongoing improvement at inference—but capitalizes on a cheap, external memory update in lieu of repeated or expensive parameter updates.", + "bbox": [ + 501, + 723, + 885, + 900 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 478, + 922, + 493, + 934 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "services and APIs to offload complex computations. Our empirical findings too illustrate that once an LLM under DC recognizes a systematic way (e.g., Python-based brute force algorithm) to handle a certain class of problems (like arithmetic puzzles), it can store that approach in memory and repeatedly retrieve it. Thus, DC not only invests extra compute in a single session but spreads that computational benefit across multiple interactions, effectively learning to use tools more consistently and reliably over time.", + "bbox": [ + 84, + 84, + 475, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3. Memory-augmented generation and reasoning", + "text_level": 1, + "bbox": [ + 84, + 237, + 447, + 253 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Augmenting language models with external memory has seen renewed interest in recent years (Munkhdalai et al., 2019; Guu et al., 2020; Khandelwal et al., 2020; Bulatov et al., 2022; Borgeaud et al., 2022; Zhong et al., 2022; Feng et al., 2022; He et al., 2024; Wang et al., 2024a)—see also (Graves et al., 2014; Weston et al., 2014; Joulin & Mikolov, 2015; Suzgun et al., 2019) for early studies. Modern retrieval-augmented LLM approaches generally consult an external corpus of documents (i.e., a knowledge base) to improve factuality and reduce hallucination (Lewis et al., 2020; Lazaridou et al., 2023; Vu et al., 2023; Zhang et al., 2024b), but the retrieval corpus is almost always fixed prior to inference and does not evolve over time. These methods have been especially effective for open-domain question answering (Lewis et al., 2020; Guu et al., 2020; Karpukhin et al., 2020), where the model's own parameters may not hold all relevant knowledge. In practice, retrieval augmentation typically involves selecting and concatenating top- $k$ passages from a knowledge-base—while useful for factual queries, the approach, however, does not inherently solve iterative improvement or learning from mistakes in the sense of building upon prior solutions at inference time.", + "bbox": [ + 84, + 260, + 477, + 593 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Another line of research more closely aligns with our vision by storing not just reference knowledge but also the reasoning processes and solution strategies of language models. Several recent works have explored this direction. Thought-R retriever (Feng et al., 2024) logs the model's chain-of-thought from past queries and uses them for new, analogous queries. Buffer-of-Thoughts (BoT; Yang et al., 2025) takes a slightly different approach by distilling high-level \"thought templates\" from problem-solving processes, though it relies on predefined templates that seem to be tailored towards specific task types that were considered in their experiments. Madaan et al. (2022) have demonstrated that deployed models like GPT-3 can be improved through memory mechanisms that capture user feedback on errors, preventing similar mistakes in future interactions. Zhang et al. (2024a) have proposed a dual memory architecture combining long-term and short-term storage for medical applications, though their approach requires fine-tuning to incorporate new knowledge.", + "bbox": [ + 84, + 599, + 477, + 888 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "While these works reveal the many strategies for harnessing memory or feedback, DC emphasizes selectively storing the most relevant insights and heuristics. DC aims to avoid naive accumulation of full raw transcripts and ephemeral chain-of-thought expansions that can lead to memory bloat. Moreover, unlike methods that assume the model can be retrained or finetuned to incorporate memory items, we remain fully external and training-free; this aligns with \"plug-and-play\" usage principle, in which an off-the-shelf model is augmented by an external memory that it reads from and writes to, but does not require any gradient-based adaptation.", + "bbox": [ + 496, + 84, + 888, + 265 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B. Additional Figures and Tables", + "text_level": 1, + "bbox": [ + 84, + 83, + 366, + 99 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1. Performance Comparison of Baseline and DC-RS Approaches", + "text_level": 1, + "bbox": [ + 84, + 109, + 552, + 125 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg", + "image_caption": [ + "Figure 8: Overall performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + ], + "image_footnote": [], + "bbox": [ + 269, + 140, + 702, + 404 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg", + "image_caption": [ + "Figure 9: Overall performance of GPT-40 under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + ], + "image_footnote": [], + "bbox": [ + 269, + 450, + 702, + 715 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 694, + 70 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2. Clustering of Errors and Corrections", + "text_level": 1, + "bbox": [ + 84, + 84, + 382, + 99 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg", + "image_caption": [ + "tSNE Visualization of the Question Embeddings in GPQA Diamond", + "Figure 10: t-SNE visualization of the embeddings of the raw questions in GPQA-Diamond. Note that correct and incorrect answers often cluster in latent embedding space. DC can help transfer learned strategies within these clusters, but without careful curation, erroneous heuristics may also spread, thus requiring careful memory refinement and verification of solution strategies." + ], + "image_footnote": [], + "bbox": [ + 101, + 137, + 870, + 510 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.3. Evolution of Memory Content under Dynamic Cheatsheet", + "text_level": 1, + "bbox": [ + 84, + 84, + 524, + 99 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg", + "image_caption": [ + "Figure 11: This figure illustrates how memory content of GPT-4o evolves over time in Game of 24, quantified using a longest-common-subsequence (LCS)-similarity metric (Suzgun et al., 2024) between consecutive states (measured at the word level). While both DC-Cu and DC-RS show high stability after the first few iterations, DC-Cu experiences slightly greater fluctuations in the second half of inference." + ], + "image_footnote": [], + "bbox": [ + 99, + 119, + 870, + 470 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.4. Solution Generator and Memory Curator Prompts", + "text_level": 1, + "bbox": [ + 84, + 85, + 475, + 99 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.4.1. Prompt Used by the Generator Model in Baseline", + "text_level": 1, + "bbox": [ + 86, + 108, + 477, + 125 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg", + "image_caption": [ + "Figure 12: Prompt used in the baseline (BL) approach, where the model receives minimal instructions. The prompt simply asks the model to answer the given question without any structured guidance, additional reasoning steps, or tool-use encouragement. This setup represents a traditional one-off inference method, reflecting how LLMs typically operate by default." + ], + "image_footnote": [], + "bbox": [ + 151, + 138, + 826, + 513 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.4.2. Prompt Used by the Generator Model in DR, FH, and DC Approaches", + "text_level": 1, + "bbox": [ + 84, + 85, + 622, + 99 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "GENERATOR (PROBLEM SOLVER)", + "text_level": 1, + "bbox": [ + 259, + 122, + 418, + 132 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Instruction: You are an expert problem-solving assistant tasked with analyzing and solving various questions using", + "bbox": [ + 259, + 135, + 707, + 145 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "a combination of your expertise and provided reference materials. Each task will include:", + "1. A specific question or problem to solve", + "2. A cheatsheet containing relevant strategies, patterns, and examples from similar problems" + ], + "bbox": [ + 259, + 145, + 624, + 172 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "##1.ANALYSIS&STRATEGY", + "text_level": 1, + "bbox": [ + 261, + 186, + 379, + 196 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Carefully analyze both the question and cheatsheet before starting", + "- Search for and identify any applicable patterns, strategies, or examples within the cheatsheet", + "- Create a structured approach to solving the problem at hand", + "- Review and document any limitations in the provided reference materials" + ], + "bbox": [ + 261, + 200, + 630, + 237 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "## 2. SOLUTION DEVELOPMENT", + "text_level": 1, + "bbox": [ + 261, + 252, + 393, + 261 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Present your solution using clear, logical steps that others can follow and review", + "- Explain your reasoning and methodology before presenting final conclusions", + "- Provide detailed explanations for each step of the process", + "- Check and verify all assumptions and intermediate calculations" + ], + "bbox": [ + 261, + 263, + 576, + 301 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "##3.PROGRAMMINGTASKS", + "text_level": 1, + "bbox": [ + 261, + 316, + 380, + 325 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "When coding is required:", + "bbox": [ + 261, + 328, + 361, + 337 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Write clean, efficient Python code", + "bbox": [ + 261, + 338, + 401, + 347 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Follow the strict code formatting and execution protocol (always use the Python code formatting block;", + "bbox": [ + 261, + 347, + 668, + 356 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "furthermore, after the code block, always explicitly request execution by appending: \"EXECUTE CODE!\":", + "bbox": [ + 261, + 356, + 668, + 364 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "```\n``python", + "bbox": [ + 261, + 366, + 307, + 375 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Your code here", + "bbox": [ + 261, + 375, + 331, + 383 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "EXECUTE CODE!", + "text_level": 1, + "bbox": [ + 261, + 393, + 334, + 402 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- All required imports and dependencies should be clearly declared at the top of your code", + "- Include clear inline comments to explain any complex programming logic", + "- Perform result validation after executing your code", + "- Apply optimization techniques from the cheatsheet when applicable", + "- The code should be completely self-contained without external file dependencies—it should be ready to be", + "executed right away", + "- Do not include any placeholders, system-specific paths, or hard-coded local paths", + "- Feel free to use standard and widely-used pip packages", + "- Opt for alternative methods if errors persist during execution", + "- Exclude local paths and engine-specific settings (e.g., avoid configurations like", + "chess.engineSimpleEngine.popen_uci(\"/usr/bin/stockfish\")" + ], + "bbox": [ + 261, + 407, + 684, + 510 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "## 4. FINAL ANSWER FORMAT", + "text_level": 1, + "bbox": [ + 261, + 525, + 387, + 532 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "ALWAYS present your final answer in the following format:", + "bbox": [ + 261, + 537, + 491, + 546 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "FINAL ANSWER:", + "text_level": 1, + "bbox": [ + 261, + 547, + 331, + 556 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "", + "bbox": [ + 261, + 559, + 302, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "(final answer)", + "bbox": [ + 261, + 568, + 320, + 575 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "", + "bbox": [ + 261, + 577, + 305, + 585 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "N.B. Make sure that the final answer is properly wrapped inside the block.", + "bbox": [ + 261, + 587, + 589, + 597 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* For multiple-choice questions: Only provide the letter choice (e.g., (A))", + "* For numerical answers: Only provide the final number (e.g., 42)", + "* For other types of answers, including free-response answers: Provide the complete final answer" + ], + "bbox": [ + 261, + 599, + 638, + 628 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Example:", + "bbox": [ + 261, + 633, + 302, + 642 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Q: What is the meaning of life?", + "bbox": [ + 261, + 642, + 383, + 651 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "A: [..]", + "bbox": [ + 261, + 651, + 287, + 659 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "FINAL ANSWER:", + "text_level": 1, + "bbox": [ + 261, + 660, + 331, + 667 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "", + "bbox": [ + 261, + 671, + 302, + 678 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "42", + "bbox": [ + 261, + 680, + 274, + 686 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "", + "bbox": [ + 261, + 688, + 305, + 696 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "CHEATSHEET:", + "text_level": 1, + "bbox": [ + 261, + 712, + 321, + 720 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "[CHEATSHEET]", + "text_level": 1, + "bbox": [ + 261, + 729, + 331, + 739 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "\"", + "bbox": [ + 261, + 741, + 302, + 748 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Now it is time to solve the following question.", + "bbox": [ + 261, + 770, + 441, + 780 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "CURRENTINPUT:", + "bbox": [ + 261, + 782, + 336, + 791 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "[QUESTION]", + "text_level": 1, + "bbox": [ + 261, + 801, + 321, + 811 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 13: Generator prompt used in the DR, FH, and DC approaches, where the model receives structured high-level instructions on solution development, strategy selection, and tool usage. This prompt explicitly encourages Python code generation and execution for computational tasks. Notably, this same structured prompt is used in all non-BL methods, including DC-Ø, DR, FH, DC-Cu, and DC-RS. We also remark that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define $M_{i}$ as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift.", + "bbox": [ + 84, + 833, + 887, + 910 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.4.3. Prompt Used by the Memory Curation Model under DC-RS", + "text_level": 1, + "bbox": [ + 84, + 84, + 552, + 99 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "CHEATSHEET CURATOR", + "text_level": 1, + "bbox": [ + 236, + 122, + 346, + 131 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Purpose and Goals", + "text_level": 1, + "bbox": [ + 236, + 133, + 326, + 143 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "You are responsible for maintaining, refining, and optimizing the Dynamic Cheatsheet, which serves as a compact yet evolving repository of problem-solving strategies, reusable code snippets, and meta-reasoning techniques. Your goal is to enhance the model's long-term performance by continuously updating the cheatsheet with high-value insights while filtering out redundant or trivial information.", + "bbox": [ + 236, + 143, + 725, + 169 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- The cheatsheet should include quick, accurate, reliable, and practical solutions to a range of technical and creative challenges.", + "bbox": [ + 236, + 170, + 687, + 179 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- After seeing each input, you should improve the content of the cheatsheet, synthesizing lessons, insights, tricks, and errors learned from past problems and adapting to new challenges.", + "bbox": [ + 236, + 179, + 725, + 196 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Core Responsibilities", + "text_level": 1, + "bbox": [ + 236, + 207, + 334, + 215 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Selective Knowledge Retention:", + "text_level": 1, + "bbox": [ + 236, + 218, + 357, + 227 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Preserve only high-value strategies, code blocks, insights, and reusable patterns that significantly contribute to problem-solving.", + "- Discard redundant, trivial, or highly problem-specific details that do not generalize well.", + "- Ensure that previously effective solutions remain accessible while incorporating new, superior methods." + ], + "bbox": [ + 236, + 227, + 697, + 253 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Continuous Refinement & Optimization:", + "text_level": 1, + "bbox": [ + 236, + 255, + 388, + 263 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Improve existing strategies by incorporating more efficient, elegant, or generalizable techniques.", + "- Remove duplicate entries or rephrase unclear explanations for better readability.", + "- Introduce new meta-strategies based on recent problem-solving experiences." + ], + "bbox": [ + 236, + 263, + 586, + 289 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Structure & Organization:", + "text_level": 1, + "bbox": [ + 236, + 292, + 336, + 300 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Maintain a well-organized cheatsheet with clearly defined sections:", + "- Reusable Code Snippets and Solution Strategies", + "- General Problem-Solving Heuristics", + "- Optimization Techniques & Edge Cases", + "-Specialized Knowledge & Theorems" + ], + "bbox": [ + 236, + 300, + 483, + 342 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Use tagging (e.g., Q14, Q22) to reference previous problems that contributed to a given strategy.", + "bbox": [ + 236, + 342, + 583, + 351 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Principles and Best Practices", + "text_level": 1, + "bbox": [ + 236, + 364, + 362, + 373 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "For every new problem encountered:", + "1. Evaluate the Solution's Effectiveness", + "- Was the applied strategy optimal?", + "- Could the solution be improved, generalized, or made more efficient?", + "- Does the cheatsheet already contain a similar strategy, or should a new one be added?" + ], + "bbox": [ + 236, + 375, + 553, + 417 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "2. Curate & Document the Most Valuable Insights", + "text_level": 1, + "bbox": [ + 236, + 420, + 421, + 429 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Extract key algorithms, heuristics, and reusable code snippets that would help solve similar problems in the future.", + "- Identify patterns, edge cases, and problem-specific insights worth retaining.", + "- If a better approach than a previously recorded one is found, replace the old version." + ], + "bbox": [ + 236, + 429, + 653, + 454 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "3. Maintain Concise, Actionable Entries", + "text_level": 1, + "bbox": [ + 236, + 457, + 383, + 465 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Keep explanations clear, actionable, concise, and to the point.", + "- Include only the most effective and widely applicable methods.", + "- Seek to extract useful and general solution strategies and/or Python code snippets." + ], + "bbox": [ + 236, + 465, + 540, + 489 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "4. Implement a Usage Counter", + "text_level": 1, + "bbox": [ + 236, + 493, + 352, + 501 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Each entry must include a usage count: Increase the count every time a strategy is successfully used in problem-solving.", + "- Use the count to prioritize frequently used solutions over rarely applied ones." + ], + "bbox": [ + 236, + 502, + 669, + 518 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Memory Update Format", + "text_level": 1, + "bbox": [ + 236, + 532, + 346, + 540 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Use the following structure for each memory item:", + "bbox": [ + 236, + 541, + 419, + 549 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "··", + "bbox": [ + 236, + 550, + 251, + 556 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 561, + 302, + 569 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 570, + 292, + 577 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "[Briefly describe the problem context, purpose, and key aspects of the solution.] (Reference: Q1, Q2, Q6, etc.)", + "bbox": [ + 236, + 577, + 620, + 585 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 585, + 294, + 594 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 595, + 282, + 603 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "[Provide a well-documented code snippet, worked-out solution, or efficient strategy.]", + "bbox": [ + 236, + 603, + 540, + 612 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 612, + 284, + 619 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 619, + 305, + 628 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "** Count: [Number of times this strategy has been used to solve a problem.]", + "bbox": [ + 236, + 628, + 508, + 637 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 638, + 302, + 647 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "[...]", + "bbox": [ + 236, + 648, + 251, + 655 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 656, + 305, + 664 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "** Count: [...]", + "bbox": [ + 236, + 664, + 287, + 672 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "[...]", + "bbox": [ + 236, + 675, + 253, + 684 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 686, + 302, + 695 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "[...]", + "bbox": [ + 236, + 696, + 251, + 703 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 704, + 305, + 712 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Prioritize accuracy, efficiency & generalizability: The cheatsheet should capture insights that apply across multiple problems rather than just storing isolated solutions.", + "bbox": [ + 236, + 723, + 732, + 739 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Ensure clarity & usability: Every update should make the cheatsheet more structured, actionable, and easy to navigate.", + "bbox": [ + 236, + 739, + 665, + 750 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Maintain a balance: While adding new strategies, ensure that old but effective techniques are not lost.", + "bbox": [ + 236, + 750, + 607, + 757 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Keep it evolving: The cheatsheet should be a living document that continuously improves over time, enhancing test-time meta-learning capabilities.", + "bbox": [ + 236, + 757, + 725, + 773 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "N.B. Keep in mind that once the cheatsheet is updated, any previous content not directly included will be lost and cannot be retrieved.", + "bbox": [ + 236, + 776, + 712, + 785 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Therefore, make sure to explicitly copy any (or all) relevant information from the previous cheatsheet to the new cheatsheet! Furthermore,", + "bbox": [ + 236, + 785, + 728, + 792 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "make sure that all information related to the cheatsheet is wrapped inside the block.", + "bbox": [ + 236, + 794, + 589, + 801 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 14: Prompt used for the memory curator under DC-RS, which is responsible for maintaining an evolving repository of problem-solving strategies, code snippets, and heuristics. The curator selectively retains high-value insights, refines existing strategies, and organizes memory efficiently. This ensures the memory (cheatsheet) remains concise, generalizable, and action-oriented, continuously improving test-time reasoning. (Once again, we note that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define $M_{i}$ as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift.)", + "bbox": [ + 84, + 816, + 888, + 906 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 475, + 922, + 493, + 935 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg", + "image_caption": [ + "Figure 15: The rest of the prompt used by the memory curator under DC-RS (Figure 14)." + ], + "image_footnote": [], + "bbox": [ + 233, + 85, + 746, + 448 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory", + "bbox": [ + 277, + 56, + 692, + 70 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_model.json b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f9f8f21268b55f1f1f14f99e57c0f352c389440a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_model.json @@ -0,0 +1,5381 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.07952v1 [cs.LG] 10 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.124, + 0.818, + 0.145 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.189, + 0.793, + 0.207 + ], + "angle": 0, + "content": "Mirac Suzgun1 Mert Yuksekgonul1 Federico Bianchi2 Dan Jurafsky1 James Zou1,2" + }, + { + "type": "title", + "bbox": [ + 0.447, + 0.233, + 0.526, + 0.248 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.253, + 0.856, + 0.54 + ], + "angle": 0, + "content": "Despite their impressive performance on complex tasks, current language models (LMs) typically operate in a vacuum: Each input query is processed separately, without retaining insights from previous attempts. Here, we present Dynamic Cheatsheet (DC), a lightweight framework that endows a black-box LM with a persistent, evolving memory. Rather than repeatedly re-discovering or re-committing the same solutions and mistakes, DC enables models to store and reuse accumulated strategies, code snippets, and general problem-solving insights at inference time. This test-time learning enhances performance substantially across a range of tasks without needing explicit ground-truth labels or human feedback. Leveraging DC, Claude 3.5 Sonnet's accuracy more than doubled on AIME math exams once it began retaining algebraic insights across questions. Similarly, GPT-4o's success rate on the Game of 24 puzzle increased from about \\(10\\%\\) to \\(99\\%\\) after the model discovered and reused a Python-based solution. In tasks prone to arithmetic mistakes, such as balancing equations, DC enabled GPT-4o and Claude to reach near-perfect accuracy by recalling previously validated code, whereas their baselines stagnated around \\(50\\%\\). Beyond arithmetic challenges, DC yields notable accuracy gains on knowledge-demanding tasks. Claude achieved a \\(9\\%\\) improvement in GPQA-Diamond and an \\(8\\%\\) boost on MMLU-Pro Engineering and Physics problems. Crucially, DC's memory is self-curated, focusing on concise, transferable snippets rather than entire transcripts, thereby facilitating meta-learning and avoiding context ballooning. Unlike fine-tuning or static retrieval methods, DC adapts LMs' problem-solving skills on the fly, without modifying their underlying parameters, and offers a practical approach for continuously refining responses and cutting routine errors. Overall, our findings present DC as a promising approach for augmenting LMs with persistent memory, bridging the divide between isolated inference events and the cumulative, experience-driven learning characteristic of human cognition.*" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.552, + 0.48, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.552, + 0.619, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.552, + 0.86, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.754, + 0.888, + 0.858 + ], + "angle": 0, + "content": "Figure 1: Comparison of different baselines and Dynamic Cheatsheet (DC) variants on challenging reasoning benchmarks, including AIME exams and GPQA-Diamond. Baseline represents a standard prompting approach with minimal guidance, while DC- \\(\\varnothing\\) (a stronger baseline) contains explicit structured instructions for problem solving, as well as for Python code generation and execution, but lacks a memory component. Our proposed DC-Cu and DC-RS variants incorporate an evolving, text-based memory to enhance inference-time learning. Results (accuracy, %) demonstrate substantial improvements, with Claude 3.5 Sonnet gaining \\(27\\%\\) on AIME 2024 and \\(30\\%\\) on AIME 2025 under DC-Cu. In Game of 24, GPT-4o leaps from \\(10\\%\\) (baseline) to \\(99\\%\\) under DC-RS, reflecting its ability to retain and apply Python-based solutions efficiently. Similarly, Claude 3.5 Sonnet's accuracy more than doubles in Math Equation Solver, reaching \\(98\\%\\). Overall, these findings highlight the impact of test-time learning through controlled memory augmentation and efficient retrieval." + }, + { + "type": "page_footnote", + "bbox": [ + 0.108, + 0.877, + 0.775, + 0.905 + ], + "angle": 0, + "content": "\\(^{1}\\)Stanford University \\(^{2}\\)Together AI. \\(\\boxtimes\\) Correspondence to: msuzgun@stanford.edu and jamesz@stanford.edu. \n*We release all our data, results, and code at http://github.com/suzgunmirac/dynamic-cheatsheet." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.084, + 0.218, + 0.1 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.11, + 0.477, + 0.276 + ], + "angle": 0, + "content": "Modern large language models (LLMs) can tackle complex reasoning tasks, answer various questions, and generate extensive texts. Yet they still suffer from one critical limitation: once deployed, these models are fixed prior to deployment and typically retain no explicit or implicit memory of past questions, successes, or mistakes during inference. They approach each new problem de novo, often re-deriving the same insights—and re-committing the same errors. In contrast, human cognition stands on a foundation of incremental learning, continuously internalizing new experiences and solutions into a persistent mental model." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.284, + 0.476, + 0.526 + ], + "angle": 0, + "content": "In this work, we present Dynamic Cheatsheet (DC), a simple and intuitive framework that endows black-box LLMs with a persistent, evolving memory at inference time. Rather than fine-tuning weights (for instance, through dynamic evaluation (Krause et al., 2019) or domain adaptation (Gururangan et al., 2020)) or retrieving facts from a massive static corpus (as in traditional retrieval-augmented generation systems (Guu et al., 2020; Zhang et al., 2024b)), DC dynamically curates a compact library of reusable strategies, solution sketches, and code snippets. Either before or after each query, DC enables the system to decide which lessons to store, what to discard, and how to refine existing entries—thus effectively \"learning\" from successes and failures. It is a flexible online-learning approach that enables a black-box LLM to improve itself without needing any explicit ground truth labels or human feedback." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.533, + 0.476, + 0.744 + ], + "angle": 0, + "content": "The overall workflow of DC is intuitive and compelling. In one version of DC (DC-Cu.), when presented with a new query, the LM first consults its external memory to see if any prior insights, strategies or relevant model solutions have been stored. It then proposes a solution by combining the retrieved insights with its own internal reasoning capabilities. Upon generating an answer, it then proceeds to a curation phase that updates the memory: If the approach seems to be correct, useful, or practical, DC codifies it in its memory for future use; if an error surfaces, DC may revise or prune faulty heuristics. This all happens without gradient-based parameter updates, so computational overhead remains modest, and compatibility with black-box APIs (e.g., GPT-4 or Claude) is fully preserved. See Figure 4." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.751, + 0.476, + 0.903 + ], + "angle": 0, + "content": "We tested DC across multiple challenging benchmarks and observed that it increases performance and reduces repetitive mistakes. On AIME 2024, Claude 3.5 Sonnet jumped from \\(23\\%\\) to \\(50\\%\\) accuracy, more than doubling its baseline score, by retaining algebraic and combinatorial insights. Likewise, it gained \\(30\\%\\) accuracy on AIME 2025. Notably, these improvements hold in knowledge-intensive tasks as well. On GPQA-Diamond, which tests specialized domain questions, DC lifted Claude by over \\(9\\%\\). In MMLU-Pro Engineering and Physics, it provided up to an \\(8\\%\\) boost in" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.085, + 0.88, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.324, + 0.888, + 0.364 + ], + "angle": 0, + "content": "Figure 2: Overall task performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (BL) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.385, + 0.889, + 0.417 + ], + "angle": 0, + "content": "performance by allowing the model to maintain a \" toolkit\" of formulas and general problem-solving patterns." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.423, + 0.888, + 0.605 + ], + "angle": 0, + "content": "An even more striking and compelling example is the Game of 24, a puzzle that requires the solver to combine four digits into an arithmetic expression equaling 24. GPT-4o's baseline performance (10%) increased to 99% under DC. Early in the test sequence, the model discovered that an efficient Python brute-force solver eliminated all manual guesswork. Once this snippet was stored, GPT-4o simply retrieved it for subsequent queries, avoiding manual arithmetic entirely. We saw a similar pattern in Math Equation Balancer, where GPT-4o and Claude soared from 45-50% to 98-100% by \"recalling\" a straightforward code-based approach instead of manually fumbling with numeric manipulations." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.612, + 0.888, + 0.734 + ], + "angle": 0, + "content": "Nonetheless, DC is not a panacea. We found that smaller models, such as GPT-4o-mini, benefit from DC in limited amounts. These models generate too few correct solutions in these challenging tasks in the first place, leaving the memory populated with flawed or incomplete strategies. Worse, they struggle to refine stored content. DC can amplify the strengths of models that can already produce high-quality outputs, but not fix foundational gaps in reasoning." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.74, + 0.889, + 0.906 + ], + "angle": 0, + "content": "We also note that DC differs from naive \"append the entire conversation history\" in-context learning approaches. Under DC, memory is carefully curated, focusing on succinct, useful, and transferable knowledge over raw transcripts. This prevents ballooning context lengths (Liu et al., 2024a) and helps ensure that repeated retrieval remains tractable. Indeed, part of DC's contribution is in formalizing a mechanism for selective, evolving retention—storing just enough to solve the next set of tasks without drowning in an ever-growing text buffer. Cf. (Karpicke & Roediger III, 2008; Roediger & Butler, 2011; Karpicke & Blunt, 2011)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.695, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.082, + 0.353, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.082, + 0.617, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.082, + 0.886, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.227, + 0.887, + 0.281 + ], + "angle": 0, + "content": "Figure 3: Algorithmic illustration of the Dynamic Cheatsheet (DC)-based approaches and other baseline methods. Here, Gen represents the solution generator model, Cur the memory curator, and Retr the retriever. While we use the same black-box LLMs for both generation and curation, we differentiate their roles via task-agnostic instructions (prompts). The retrieval mechanism ranks historical inputs based on cosine similarity with the current query, selecting the most relevant past examples along with their generated solutions." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.287, + 0.445, + 0.305 + ], + "angle": 0, + "content": "2. Dynamic Cheatsheet (DC) Methodology" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.313, + 0.477, + 0.448 + ], + "angle": 0, + "content": "DC, in its core, includes an external, non-parametric memory that evolves in tandem with the LLM's inference process. Rather than fine-tuning the underlying weights, DC tracks successes and failures of the model at test time, then selectively stores heuristics, strategies, or short textual artifacts that can guide the LLM in future instances. Notably, this approach respects the black-box nature of many commercial LLM APIs: no gradient-based updates are required, and the model's core parameters remain untouched." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.465, + 0.398, + 0.481 + ], + "angle": 0, + "content": "2.1. DC: Building Blocks and Iterative Loop" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.489, + 0.477, + 0.534 + ], + "angle": 0, + "content": "The DC framework consists of two core modules: generation and curation. Both modules can easily operate on top of the same LM (prompted differently) or on separate LMs." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.549, + 0.371, + 0.564 + ], + "angle": 0, + "content": "2.1.1. Solution Generation with Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.573, + 0.475, + 0.693 + ], + "angle": 0, + "content": "Let's consider a sequence of inputs \\((x_{1},x_{2},\\ldots ,x_{n})\\) , where each \\(x_{i}\\sim \\mathcal{D}_{\\mathrm{test}}\\) indicates a new query or problem posed to the model sampled from the same distribution \\(\\mathcal{D}_{\\mathrm{test}}\\) (a typical setting in online learning). The distribution \\(\\mathcal{D}_{\\mathrm{test}}\\) is unknown to us. At the \\(i\\) -th step, the model is provided with both the new query \\(x_{i}\\) and the current memory state \\(M_{i}\\) which captures knowledge gleaned from previous successes and failures. We denote the solution generator by Gen:" + }, + { + "type": "equation", + "bbox": [ + 0.219, + 0.701, + 0.475, + 0.717 + ], + "angle": 0, + "content": "\\[\n\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.724, + 0.476, + 0.77 + ], + "angle": 0, + "content": "Here, \\(\\tilde{y}_i\\) is the candidate solution produced by the model. \\(M_{i}\\) helps condition the model to reuse or adapt previously stored solutions, insights, techniques, or heuristics." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.784, + 0.295, + 0.8 + ], + "angle": 0, + "content": "2.1.2. Memory Curation Step" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.808, + 0.476, + 0.838 + ], + "angle": 0, + "content": "After the generator produces its answer \\(\\tilde{y}_i\\) to \\(x_i\\), the curator, Cur, updates the current content of the memory:" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.845, + 0.475, + 0.862 + ], + "angle": 0, + "content": "\\[\nM _ {i + 1} = \\operatorname {C u r} \\left(M _ {i}, x _ {i}, \\tilde {y} _ {i}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.876, + 0.476, + 0.906 + ], + "angle": 0, + "content": "During memory curation, Cur mainly considers: (i) the usefulness and generalizability of the newly produced answer" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.288, + 0.889, + 0.41 + ], + "angle": 0, + "content": "(i.e., if \\(\\tilde{y}_i\\) is correct or provides valuable and generalizable insights, it is distilled into a form suitable for later reference), (ii) refinement or removal of existing memory entries (i.e., if an existing memory entry was incorrect or superseded by a more efficient or versatile strategy, Cur may remove or update it), and (iii) clarity and compactness of the entire memory (i.e., memory entries are consolidated to retain succinct, high-impact references and heuristics)." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.422, + 0.88, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.555, + 0.88, + 0.569 + ], + "angle": 0, + "content": "Figure 4: Illustration of Dynamic Cheatsheet (DC-Cu variant)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.572, + 0.888, + 0.709 + ], + "angle": 0, + "content": "Cur does not have access to ground-truth labels; so, it has to assess the correctness and efficiency of the solutions by itself before updating the memory. In our experiments, we instruct a single model to perform this crucial step. Yet, in practice, Cur can be implemented as a series of steps that instruct multiple tools and models, through different prompts, to verify the validity and efficiency of the solution and to transform the raw solution text into even more generalizable, reliable, and efficient strategies, insights, and code snippets." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.715, + 0.887, + 0.821 + ], + "angle": 0, + "content": "We refer to this version of DC above as DC-Cu (short for DC-Cumulative). Under DC-Cu, the system first performs solution generation based on the current memory (Eqn. 1) and then updates the memory (Eqn. 2), by cumulatively expanding and refining the memory items thus far. Unlike DC-RS, which is discussed in the next part, DC-Cu, does not contain a retrieval component, however." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.837, + 0.809, + 0.853 + ], + "angle": 0, + "content": "2.2. DC with Retrieval & Synthesis (DC-RS)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.861, + 0.886, + 0.906 + ], + "angle": 0, + "content": "DC-Cu has two potential drawbacks. First, it updates the memory after processing an input query, rather than refining it before generating a response. This means the model lacks" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.057, + 0.695, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.192 + ], + "angle": 0, + "content": "the opportunity to incorporate new insights from the current query while reasoning through its solution. Second, DC-Cu does not store or revisit past input-output pairs unless explicitly retained in memory. This omission prevents the model from directly retrieving and leveraging historical responses, which can be particularly valuable in benchmarks covering diverse topics or domains (e.g., GPQA-Diamond)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.199, + 0.475, + 0.304 + ], + "angle": 0, + "content": "To address these issues, DC-RS modifies the sequence of memory updates and introduces a retrieval mechanism, Retr, into the curation process. Retr allows the model to retrieve the most relevant past input-output pairs from its knowledge base. By refining the memory before responding and retrieving prior cases when needed, DC-RS enhances the model's adaptability and reasoning efficiency." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.311, + 0.476, + 0.42 + ], + "angle": 0, + "content": "DC-RS first retrieves\\(^{1}\\) top-\\(k\\) most similar inputs, along with their model-generated outputs, from previously seen examples, which we denote by \\(R_{i}^{(k)}\\) (or simply \\(R_{i}\\)).2 It then passes these select examples, \\(R_{i}\\), along with the most recent memory content, \\(M_{i-1}\\), to the curator to update the memory, that is to get \\(M_{i}\\). Finally, it uses the generator to produce \\(\\tilde{y}_{i}\\), given \\(x_{i}\\) and \\(M_{i}\\). We summarize all these steps below:" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.432, + 0.475, + 0.45 + ], + "angle": 0, + "content": "\\[\nR _ {i} = \\operatorname {R e t r} \\left(x _ {i}, \\left\\{\\left(x _ {j}, \\tilde {y} _ {j}\\right) \\right\\} _ {j < i}, k\\right) \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.452, + 0.475, + 0.468 + ], + "angle": 0, + "content": "\\[\nM _ {i} = \\operatorname {C u r} \\left(M _ {i - 1}, x _ {i}, R _ {i}\\right) \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.47, + 0.475, + 0.487 + ], + "angle": 0, + "content": "\\[\n\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.506, + 0.185, + 0.519 + ], + "angle": 0, + "content": "2.3. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.529, + 0.476, + 0.559 + ], + "angle": 0, + "content": "To quantify the efficacy of memory-driven test-time learning, we compare DC and its variants to four baselines:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.567, + 0.476, + 0.626 + ], + "angle": 0, + "content": "(1) Baseline prompting (BL). This plain \"vanilla\" prompting approach, with minimal instructions, simply prompts the model without any iterative memory or retrieval mechanism. It reflects traditional one-off inference.\\(^3\\)" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.634, + 0.475, + 0.74 + ], + "angle": 0, + "content": "(2) DC-\\(\\varnothing\\) (empty memory). To isolate the effect of memory curation, this DC baseline always keeps the memory content effectively empty. \\(^4\\)DC-\\(\\varnothing\\) allows us to measure how much performance improvement arises purely from storing and reusing knowledge over time. While there is no continuous knowledge storage or strategy reuse, this method follows the instructions in Figure 13 and is therefore a strong baseline." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.567, + 0.476, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.749, + 0.473, + 0.776 + ], + "angle": 0, + "content": "1We used OpenAI's text-embedding-3-small model to map input queries (raw questions) to embedding vectors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.776, + 0.473, + 0.803 + ], + "angle": 0, + "content": "2We set \\(k\\) to 3 in all our experiments. (Initially, we considered higher top- \\(k\\) values such as 5 and 7, but the gain was insignificant.)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.803, + 0.475, + 0.867 + ], + "angle": 0, + "content": "3Please refer to Figure 12 to see the full instruction (prompt) used in BLh. We experimented with the zero-shot CoT approach (Kojima et al., 2022) in our preliminary experiments, but it did not yield any gains (Arcuschin et al., 2025). We, therefore, did not include it as a baseline method in our experiments." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.867, + 0.476, + 0.905 + ], + "angle": 0, + "content": "4We adopt the generator prompt template used in DC-RS, namely Figure 13, for DC- \\(\\emptyset\\) , though we replace the memory placeholder with the text \"empty cheatsheet)." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.749, + 0.476, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.886, + 0.176 + ], + "angle": 0, + "content": "(3) Full-History Appending (FH). This is a naive approach that appends the entire conversation history to the model input without any curation or truncation. FH can exceed context-window limits and include redundant or low-value information, but nonetheless, it provides a useful comparison for methods that actively curate content." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.183, + 0.887, + 0.273 + ], + "angle": 0, + "content": "(4) Dynamic Retrieval (DR). A final baseline uses retrieval but no curation. Specifically, for each new query, it retrieves the most similar past interactions and directly pastes them, verbatim, into the prompt. DR can help the model see relevant input-output pairs but not directly codify any abstract or generalized solutions.7" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.282, + 0.886, + 0.312 + ], + "angle": 0, + "content": "Figure 3 (above) contains pseudocodes of all the primary methods and baselines considered in this paper." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.331, + 0.69, + 0.349 + ], + "angle": 0, + "content": "3. Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.357, + 0.665, + 0.371 + ], + "angle": 0, + "content": "3.1. Tasks and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.38, + 0.888, + 0.532 + ], + "angle": 0, + "content": "To rigorously evaluate DC's effectiveness, we focus on challenging tasks where contemporary state-of-the-art LLMs, such as GPT-4o and Claude 3.5, still face limitations. Rather than evaluating on benchmarks where performance is near saturation (e.g., BBH (Suzgun et al., 2023b), MGSM (Shi et al., 2023), GSM8K (Cobbe et al., 2021)), we prioritize tasks that demand multi-step reasoning, heuristic search, strategic adaptation, and cumulative learning—that is, tasks in which iterative memory refinement can yield tangible improvements over time.\\(^{8}\\)" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.539, + 0.888, + 0.585 + ], + "angle": 0, + "content": "Overall, the selected datasets include algorithmic, logical, and domain-specific reasoning tasks, each chosen to stress-test the model's ability to refine its reasoning over time." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.592, + 0.888, + 0.713 + ], + "angle": 0, + "content": "(a) AIME 2020-2025 Exam Questions: The American Invitational Mathematics Examination (AIME) is a prestigious high-school competition featuring complex problems across algebra, combinatorics, number theory, geometry, and probability. These questions require deep mathematical reasoning and multi-step problem-solving. We consider three subsets: AIME \\(2024^{9}\\) (30 questions), AIME \\(2025^{10}\\) (30 questions), and AIME \\(2020 - 2024^{11}\\) (133 questions)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.496, + 0.722, + 0.887, + 0.761 + ], + "angle": 0, + "content": "5We consider and test this baseline only on AIME 2024 and AIME 2025, which are relatively small in their size (each contains 30 examples) compared to other benchmarks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.496, + 0.761, + 0.885, + 0.8 + ], + "angle": 0, + "content": "6We use the generator prompt template in Figure 13 again, but include the entire raw input-output pairs from the previous steps in the memory—without any curation, truncation, or synthesis." + }, + { + "type": "page_footnote", + "bbox": [ + 0.496, + 0.8, + 0.888, + 0.827 + ], + "angle": 0, + "content": "\\(^{7}\\mathrm{FH}\\) is similar to DR, but we include only a select (most relevant) input-output pairs in the memory content." + }, + { + "type": "page_footnote", + "bbox": [ + 0.496, + 0.827, + 0.887, + 0.853 + ], + "angle": 0, + "content": "8We release all the original input-output pairs in our codebase: http://github.com/suzgunmirac/dynamic-cheatsheet." + }, + { + "type": "page_footnote", + "bbox": [ + 0.521, + 0.853, + 0.875, + 0.866 + ], + "angle": 0, + "content": "\\(^{9}\\)huggingface.co/datasets/HuggingFaceH4/aime_2024" + }, + { + "type": "page_footnote", + "bbox": [ + 0.521, + 0.867, + 0.856, + 0.88 + ], + "angle": 0, + "content": "10huggingface.co/datasets/yentinglin/aime_2025." + }, + { + "type": "page_footnote", + "bbox": [ + 0.52, + 0.88, + 0.905, + 0.894 + ], + "angle": 0, + "content": "11huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024." + }, + { + "type": "list", + "bbox": [ + 0.496, + 0.722, + 0.905, + 0.894 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.206 + ], + "angle": 0, + "content": "(b) GPQA-Diamond (Rein et al., 2024): A high-quality, difficult subset of the Graduate-Level Google-Proof Q&A (GPQA) benchmark, GPQA-Diamond contains 198 expert-validated questions across natural sciences, including biology, chemistry, and physics. These questions were correctly answered by domain experts but often missed by non-experts, making them ideal for evaluating DC's ability to handle complex, multi-hop reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.213, + 0.476, + 0.35 + ], + "angle": 0, + "content": "(c) Game of 24 (Yao et al., 2023; Suzgun & Kalai, 2024): A heuristic-driven arithmetic challenge where the objective is to form an expression that evaluates to 24 using four given numbers exactly once. For instance, if the input values were \"7 7 8 11,\" one valid answer would be \"8*(7+7-11).\" This task emphasizes systematic search, strategic reasoning, and pattern recognition. We use the 100 examples from (Suzgun & Kalai, 2024) to assess DC's capacity for refining computational heuristics and strategy over manual attempts." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.357, + 0.476, + 0.478 + ], + "angle": 0, + "content": "(d) Math Equation Balancer: Focused on elementary arithmetic reasoning, this dataset requires the model to complete equations by inserting the appropriate operators to form valid expressions. The task emphasizes the sequential placement of operators, as illustrated by the example “1 ? 2 ? 3 = 6,” where the model must identify the correct operators to satisfy the equation (“1 + 2 + 3 = 6” or “1 * 2 * 3 = 6”). We compiled 250 arithmetic expressions for this task." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.485, + 0.476, + 0.576 + ], + "angle": 0, + "content": "(e) MMLU-Pro (Engineering and Physics) (Wang et al., 2024b): A professional-level subset of the MMLU benchmark focused on physics and engineering. All questions are presented in a multiple-choice form. The original dataset contains 1,299 physics and 969 engineering questions. We sampled 250 questions from each subset." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.593, + 0.245, + 0.608 + ], + "angle": 0, + "content": "3.2. Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.616, + 0.476, + 0.707 + ], + "angle": 0, + "content": "We evaluate the efficacy of DC across a range of language models. Our selection includes both state-of-the-art LLMs such as GPT-4o and Claude 3.5 Sonnet and their smaller-scale counterparts (namely, GPT-4o-mini and Claude 3.5 Haiku), as well as models such as DeepSeek R1 that are designed specifically for reasoning-intensive tasks." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.724, + 0.26, + 0.738 + ], + "angle": 0, + "content": "3.3. Evaluation Protocol" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.747, + 0.476, + 0.808 + ], + "angle": 0, + "content": "To ensure standardized and reliable evaluation, all models are instructed to format their final answers in a structured, machine-readable format. All model answers are expected to be wrapped in the following XML-style tags:" + }, + { + "type": "code", + "bbox": [ + 0.119, + 0.813, + 0.236, + 0.856 + ], + "angle": 0, + "content": " (final answer) " + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.861, + 0.476, + 0.906 + ], + "angle": 0, + "content": "This explicit format ensures accurate and consistent parsing, eliminating errors arising from extraneous text or ambiguous outputs. Once extracted, the final answers are evaluated" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.085, + 0.866, + 0.101 + ], + "angle": 0, + "content": "using their corresponding task-specific accuracy metric." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.115, + 0.669, + 0.13 + ], + "angle": 0, + "content": "3.3.1. Accuracy Metrics" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.139, + 0.887, + 0.169 + ], + "angle": 0, + "content": "Given the diversity of the tasks, we use different accuracy metrics tailored to the specific requirements of each dataset." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.176, + 0.888, + 0.268 + ], + "angle": 0, + "content": "Soft Match (SM) is a lenient metric that considers an answer correct if it matches the ground truth after ignoring minor formatting differences, such as punctuation or whitespace variations. We apply this metric to GPQA-Diamond, and MMLU Pro (Engineering and Physics), in which questions are presented in a multiple-choice format." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.274, + 0.888, + 0.365 + ], + "angle": 0, + "content": "Functionally Correct (FC) is an even more flexible metric that evaluates whether the model's output satisfies the task-specific constraints, even if the exact numeral presentation or formatting differs slightly from the reference solution. We apply this metric to the Game of 24, Math Equation Balancer, and AIME benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.385, + 0.634, + 0.401 + ], + "angle": 0, + "content": "4. Main Results" + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.411, + 0.825, + 0.441 + ], + "angle": 0, + "content": "4.1. DC enables test-time learning and reduces repetitive errors" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.449, + 0.888, + 0.631 + ], + "angle": 0, + "content": "One of the most compelling illustrations of DC's capabilities emerges from the Game of 24 task. As seen in Table 1, GPT-4o's baseline accuracy on this arithmetic puzzle was just \\(10\\%\\). Under DC-RS, its performance increased to \\(99\\%\\), illustrating DC's capacity for test-time learning and iterative refinement. Early in the task sequence, GPT-4o discovered a reliable, Python-based brute-force method to solve Game of 24 and later on recognized the repetitive structure of the problem. The model then encoded this approach into its memory. Once established, GPT-4o consistently retrieved and applied the more or less same Python solution for subsequent examples, leading to rapid and accurate results." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.638, + 0.887, + 0.773 + ], + "angle": 0, + "content": "The performance under DC-\\(\\varnothing\\) (19%) further highlights the positive impact of memory curation and retrieval. DC-\\(\\varnothing\\) uses the same core generator but keeps the memory empty, thus lacking the mechanism to store and reuse solutions. The large gap between 19% (DC-\\(\\varnothing\\)) and 99% (DC-RS) confirms that effective memory usage, in which past solutions are retrieved and generalized, is the main driver of GPT-4o's transformation from ad-hoc solver to near-perfect performer in Game of 24." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.781, + 0.888, + 0.888 + ], + "angle": 0, + "content": "In contrast, Claude 3.5 Sonnet showed marginal gain, moving from \\(12\\%\\) to \\(14\\%\\). Despite DC's scaffolding, Claude did not internalize a generalized approach but instead continued to rely on manual arithmetic solutions. This underscores that while DC provides the framework for test-time adaptation, its ultimate success hinges on the model's innate capacity to identify and encode robust, reusable strategies." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.082, + 0.855, + 0.255 + ], + "angle": 0, + "content": "
TasksClaude 3.5 SonnetGPT-4o
BLDC-∅DRDC-Cu.DC-RSBLDC-∅DRDC-Cu.DC-RS
AIME 202423.336.743.350.046.720.036.726.736.740.0
AIME 20256.723.323.336.730.06.710.010.016.720.0
AIME 2020–246.730.139.138.440.69.824.124.120.324.8
Game of 2412.010.011.014.014.010.019.06.093.099.0
GPQA Diamond59.660.163.661.168.757.157.155.158.157.1
Math Eqn. Balancer44.856.460.410097.850.088.010010099.2
MMLU Pro Eng.61.257.265.266.867.653.251.648.844.051.2
MMLU Pro Physics74.075.680.477.682.075.670.875.670.475.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.266, + 0.888, + 0.343 + ], + "angle": 0, + "content": "Table 1: Performance comparison of Dynamic Cheatsheet (DC) variants for Claude 3.5 Sonnet and GPT-4o across multiple benchmarks. BL (Baseline): standard inference without memory; DC-∅ (Empty Memory): includes structured problem-solving and explicit tool-use instructions but no memory retention mechanism; DR (Dynamic Retrieval): uses retrieval but lacks curated memory updates; DC-Cu (Cumulative Memory): iteratively accumulates model solutions but lacks retrieval; and DC-RS (Retrieval & Synthesis): combines retrieval with memory refinement/synthesis. These results highlight substantial accuracy gains under DC: Claude 3.5 Sonnet's AIME 2024 accuracy jumps by \\(27\\%\\) under DC-Cu, and GPT-4o's Game of 24 accuracy leaps from \\(10\\%\\) to \\(99\\%\\) under DC-RS." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.352, + 0.437, + 0.383 + ], + "angle": 0, + "content": "4.2. DC provides substantial improvements across various challenging reasoning benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.391, + 0.475, + 0.452 + ], + "angle": 0, + "content": "Beyond Game of 24, DC yielded significant gains across a range of complex mathematical and algorithmic tasks. See Table 1. The results below illustrate how iterative solution reuse can helpful in complex reasoning problems." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.459, + 0.477, + 0.671 + ], + "angle": 0, + "content": "AIME Exam Problems. The AIME exams provided some of the most dramatic improvements under DC. For Claude 3.5 Sonnet, performance on AIME 2020-2024 surged from \\(6.7\\%\\) to \\(40.6\\%\\) under DC-RS. A similar upward trend appeared on AIME 2024 (23.3% to \\(50.0\\%\\)) and AIME 2025 (6.7% to \\(36.7\\%\\)) under DC-Cu. DC-Cu, where the model curates memory after processing the input and does not involve a retrieval stage, also proved potent in recent exam sets, achieving highest accuracy scores in AIME 2024 and 2025. GPT-4o also showed some noteworthy gains. Its AIME 2024 performance raised from \\(20.0\\%\\) to \\(40.0\\%\\) under DC-RS, while its AIME 2025 score climbed from \\(6.7\\%\\) to \\(20.0\\%\\). These boosts suggest that structured test-time-produced memory can help tackle difficult math problems." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.678, + 0.476, + 0.875 + ], + "angle": 0, + "content": "GPQA-Diamond. On GPQA-Diamond, Claude 3.5 Sonnet improved from \\(59.6\\%\\) to \\(68.7\\%\\) under DC-RS, a robust \\(9.1\\%\\) gain purely from test-time adaptation. DR \\((63.6\\%)\\) demonstrated that retrieval alone helps, but the further jump to \\(68.7\\%\\) highlights how memory curation and synthesis can yield additional benefits. By contrast, GPT-4o experienced only a slight increase from \\(57.1\\%\\) to \\(58.1\\%\\) with DC-RS; our quantitative analysis of the model's outputs and memory showed us that retrieval can, in some cases, introduce confusion, especially if suboptimal examples are recalled. This contrast between different models underscores how the success of retrieval-based adaptation partly depends on model-specific generation and curation capabilities." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.881, + 0.476, + 0.897 + ], + "angle": 0, + "content": "Math Equation Balancer. As Table 1 shows, the base-" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.352, + 0.888, + 0.459 + ], + "angle": 0, + "content": "line performance for Claude 3.5 Sonnet (44.8%) rose to \\(98 - 100\\%\\) with DC-RS and DC-Cu, while GPT-4o similarly improved from \\(50.0\\%\\) to near-perfect accuracy (99-100%). As observed in Game of 24, the models quickly learned an algorithmic or Python-based balancing routine, stored it in external memory, and repeatedly retrieved it, achieving exceptional consistency once the core method was established." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.465, + 0.889, + 0.631 + ], + "angle": 0, + "content": "MMLU-Pro Tasks. For MMLU-Pro Eng. and Physics, Claude 3.5 Sonnet exhibited consistent gains, rising by up to \\(8.0\\%\\) in Physics (from \\(74\\%\\) to \\(82\\%\\)). Our examination of the curated memory entries shows that Claude temporarily stored and retrieved compact \"reference guides\" on engineering and physics principles, which might have proved beneficial for thematically similar questions. GPT-4o, on the other hand, observed slight decreases from the baseline on these tasks, suggesting that domain complexity and baseline knowledge gaps may attenuate DC's benefits if curated memory is less reliable or consistent." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.648, + 0.877, + 0.68 + ], + "angle": 0, + "content": "4.3. Memory curation (DC) fosters generalization and provides gains over full-history-appending (FH)" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.687, + 0.889, + 0.899 + ], + "angle": 0, + "content": "Whereas FH (full-history) simply appends every previous dialogue turn into the prompt, DC actively filters and synthesizes high-value content. As shown in Table 2, Sonnet under FH reached \\(26.7\\%\\) accuracy in 2024 questions, while DC-based methods hit \\(50.0\\%\\). Similarly, GPT-4o managed a baseline of \\(20.0\\%\\) but fell to \\(6.7\\%\\) using FH, in direct contrast to \\(40.0\\%\\) with DC-RS. Excessive uncurated input-output pairs can not only overwhelm the model's context window, dilute crucial insights and hamper retrieval efficiency, but also significantly increase inference costs over time. On the other hand, DC's selective memory curation ensures that problem-solving tips or code snippets remain readily accessible without clutter, thus facilitating more robust and consistent improvements across consecutive queries." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.695, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.089, + 0.464, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.495, + 0.477, + 0.586 + ], + "angle": 0, + "content": "Figure 5: Excerpt from GPT-4o's external memory after processing 100 examples from Game of 24 under DC-RS. Early in the test sequence, the model discovered a Python-based brute-force solution, stored it, and subsequently retrieved it for subsequent puzzles. This shift to structured code reuse resulted in a dramatic performance increase from \\(10\\%\\) to \\(99\\%\\) accuracy, eliminating arithmetic errors and redundant problem-solving efforts." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.604, + 0.449, + 0.619 + ], + "angle": 0, + "content": "4.4. DC fosters efficient tool usage / code generation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.627, + 0.477, + 0.763 + ], + "angle": 0, + "content": "A successful behavior under DC is the LLMs' inclination toward code generation to handle computationally intensive tasks. GPT-4o's near-complete reliance on Python scripts for Game of 24 exemplifies this shift. Rather than performing manual arithmetic repeatedly, GPT-4o recognized that code-based brute force is more systematic. It generated, stored, and iteratively refined a Python function that tested permutations of numbers and operations, allowing it to solve each instance of Game of 24 with high accuracy." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.77, + 0.479, + 0.907 + ], + "angle": 0, + "content": "This inclination toward automation illustrates DC's potential to nurture efficient tool-usage: the capacity to recognize when external tools (e.g., Python, symbolic math engines, or dedicated solvers) are more robust than internally verbalized chain-of-thought calculations. While we restricted the scope of tool usage to Python interpreter in this study, future expansions could easily explore a broader suite of tools, potentially amplifying LLM performance in specialized domains such as computational biology or legal research." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.082, + 0.885, + 0.157 + ], + "angle": 0, + "content": "
TasksClaude 3.5 SonnetGPT-4o
BLFHDC-Cu.BLFHDC-RS
AIME 202423.326.750.020.013.340.0
AIME 20256.76.736.76.73.320.0
" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.166, + 0.887, + 0.256 + ], + "angle": 0, + "content": "Table 2: Performance breakdown of BL (default baseline), FH (full history), DC-Cu, and DC-RS approaches under AIME 2024 and 2025. FH stores all past queries and outputs, while DC-Cu and DC-RS selectively refine stored memory. Results indicate that targeted memory curation in DC-RS leads to greater accuracy gains compared to full history retention, supporting the need for structured, self-updating knowledge mechanisms." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.267, + 0.878, + 0.283 + ], + "angle": 0, + "content": "4.5. Model scale and capacity impact DC effectiveness" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.29, + 0.887, + 0.382 + ], + "angle": 0, + "content": "Our current results indicate that the effectiveness of DC is strongly tied to the model's scale and underlying generative capacity. While Claude 3.5 Sonnet and GPT-4o showed notable gains across multiple tasks under DC, their smaller counterparts, Claude 3.5 Haiku and GPT-4o-mini, showed more limited and inconsistent gains." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.388, + 0.889, + 0.48 + ], + "angle": 0, + "content": "Table 3, for instance, shows that Claude 3.5 Haiku achieved moderate gains under DC, with its accuracy on AIME 2024 rising from \\(10.0\\%\\) (baseline) to \\(36.7\\%\\) under DC-Cu. But gains on AIME 2025 were weaker, reaching only \\(13.3\\%\\) under DC-\\(\\varnothing\\) and DC-Cu. Interestingly, GPQA-Diamond saw an improvement from \\(43.4\\%\\) to \\(49.0\\%\\) under DC-RS," + }, + { + "type": "title", + "bbox": [ + 0.511, + 0.513, + 0.729, + 0.524 + ], + "angle": 0, + "content": "GENERAL META-REASONING STRATEGIES" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.527, + 0.598, + 0.537 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.544, + 0.584, + 0.555 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.556, + 0.808, + 0.566 + ], + "angle": 0, + "content": "Systematic Problem Analysis Framework (Reference: Q1-Q20)" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.696, + 0.578 + ], + "angle": 0, + "content": "For complex mathematical problems:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.579, + 0.696, + 0.59 + ], + "angle": 0, + "content": "1. State problem requirements clearly" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.591, + 0.747, + 0.602 + ], + "angle": 0, + "content": "2. List key observations and theorems applicable" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.602, + 0.693, + 0.613 + ], + "angle": 0, + "content": "3. Identify patterns and relationships" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.708, + 0.625 + ], + "angle": 0, + "content": "4. Break into manageable sub-problems" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.626, + 0.641, + 0.636 + ], + "angle": 0, + "content": "5. Verify against examples" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.637, + 0.864, + 0.648 + ], + "angle": 0, + "content": "6. Consider computational approach when analytical solution is complex" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.649, + 0.83, + 0.659 + ], + "angle": 0, + "content": "7. For grid problems, analyze movement patterns and symmetries" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.66, + 0.841, + 0.671 + ], + "angle": 0, + "content": "8. For combinatorial problems, use appropriate counting techniques" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.672, + 0.734, + 0.682 + ], + "angle": 0, + "content": "9. Implement verification code when possible" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.683, + 0.704, + 0.694 + ], + "angle": 0, + "content": "10. Consider edge cases and constraints" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.695, + 0.805, + 0.706 + ], + "angle": 0, + "content": "11. For grid coloring problems, consider row/column patterns" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.707, + 0.588, + 0.717 + ], + "angle": 0, + "content": "" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.579, + 0.864, + 0.717 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.722, + 0.572, + 0.733 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.734, + 0.62, + 0.745 + ], + "angle": 0, + "content": "Example application:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.746, + 0.708, + 0.756 + ], + "angle": 0, + "content": "1. Requirements: list all given conditions" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.757, + 0.734, + 0.767 + ], + "angle": 0, + "content": "2. Observations: identify applicable theorems" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.768, + 0.722, + 0.779 + ], + "angle": 0, + "content": "3. Patterns: look for structural relationships" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.677, + 0.791 + ], + "angle": 0, + "content": "4. Sub-problems: break into steps" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.792, + 0.691, + 0.802 + ], + "angle": 0, + "content": "5. Verification: test against examples" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.803, + 0.737, + 0.814 + ], + "angle": 0, + "content": "6. Implementation: use Python for verification" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.815, + 0.575, + 0.825 + ], + "angle": 0, + "content": "" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.746, + 0.737, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.831, + 0.602, + 0.841 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.842, + 0.563, + 0.852 + ], + "angle": 0, + "content": "Count: 20" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.868, + 0.887, + 0.92 + ], + "angle": 0, + "content": "Figure 6: Example of Claude 3.5 Sonnet's curated memory after processing 20 AIME 2024 questions under DC-Cu. The memory captures key solution strategies, enables the model to generalize across similar computational problems, and boosts its accuracy." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.085, + 0.482, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.489, + 0.084, + 0.877, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.272, + 0.888, + 0.325 + ], + "angle": 0, + "content": "Figure 7: Cumulative performance progression under DC for GPQA-Diamond (left) and Game of 24 (right). In GPQA-Diamond, Claude 3.5 Sonnet steadily improves as it accumulates relevant knowledge snippets (the first few points are noisy because \\(y\\) measures cumulative accuracy). Meanwhile, in Game of 24, GPT-4o rapidly transitions from trial-and-error arithmetic to near-perfect performance once it recognizes and stores a Python-based solution. These trends highlight DC's ability to enhance accuracy via iterative test-time learning." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.333, + 0.475, + 0.363 + ], + "angle": 0, + "content": "suggesting that retrieval-based adaptation might still provide utility in smaller models." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.379, + 0.455, + 0.56 + ], + "angle": 0, + "content": "
TasksClaude 3.5 Haiku
BLDC-∅DC-Cu.DC-RS
AIME 202410.026.736.730.0
AIME 20250.013.313.310.0
GPQA-Diamond43.441.943.749.0
TasksGPT-4o-mini
BLDC-∅DC-Cu.DC-RS
AIME 202416.720.013.313.3
AIME 202510.013.313.316.7
GPQA-Diamond34.334.333.832.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.568, + 0.475, + 0.658 + ], + "angle": 0, + "content": "Table 3: Performance of Claude 3.5 Haiku and GPT-4o-mini, the smaller counterparts of Claude 3.5 Sonnet and GPT-4o, across AIME (2024, 2025) and GPQA-Diamond. These smaller models struggle to fully leverage DC, suggesting that memory-based adaptation is most effective when the base LM has sufficient generative competence. Performance improvements are more muted, highlighting the dependency of DC on model-scale reasoning ability." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.664, + 0.477, + 0.801 + ], + "angle": 0, + "content": "That said, GPT-4o-mini (Table 3) showed even smaller gains, with some variants leading to slight declines in performance. On AIME 2024, DC- \\(\\varnothing\\) provided a \\(20.0\\%\\) boost, but both DC-Cu and DC-RS performed worse than baseline. AIME 2025 showed a minor improvement, peaking at \\(16.7\\%\\) under DC-RS. On GPQA-Diamond, GPT-4o-mini's performance, however, remained largely stagnant or slightly declined under memory-based adaptation, suggesting that it struggled to leverage stored information effectively." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.808, + 0.468, + 0.822 + ], + "angle": 0, + "content": "These imply two drawbacks of smaller models under DC:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.831, + 0.476, + 0.906 + ], + "angle": 0, + "content": "(a) Generative competence. For DC to be effective, the base model must produce correct solutions with sufficient frequency to populate the memory with high-quality, reusable strategies. Smaller models, such as GPT-4o-mini and Claude 3.5 Haiku, generate correct solutions less reliably," + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.333, + 0.888, + 0.379 + ], + "angle": 0, + "content": "leading to a sparse or low-quality memory repository. As a result, iterative refinement stalls because the stored knowledge consists mostly of incorrect or partial attempts." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.385, + 0.889, + 0.537 + ], + "angle": 0, + "content": "(b) Contextual and memory curation limitations. Smaller models struggle with long-context understanding/generation and memory retrieval, leading to inefficient or irrelevant memory usage. Unlike their larger counterparts, which can more effectively retrieve and synthesize solutions from stored heuristics, smaller models often fail to retrieve the most relevant past solutions or misapply retrieved knowledge to new problems. This results in inconsistent performance under DC-RS, particularly in tasks requiring complex reasoning or strategic adaptation." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.553, + 0.887, + 0.584 + ], + "angle": 0, + "content": "4.6. Test-time task similarity and example ordering can amplify DC's overall impact" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.592, + 0.889, + 0.803 + ], + "angle": 0, + "content": "Another central insight is that DC thrives when test examples share structural similarities. In both Game of 24 and Math Equation Balancer, once GPT-4o identified an efficient solution, it reused it consistently for subsequent tasks. Similarly, in AIME, discovering a geometry or combinatorics strategy allowed for easy transfer across questions of analogous structure. Consequently, tasks arranged to present related questions early may accelerate and improve the model's test-time learning. This suggests that curriculum-style learning (Bengio et al., 2009), where simpler or archetypal problems are presented first to build a repository of valid heuristics, may potentially bootstrap performance. Cf. (Lopez-Paz & Ranzato, 2017; Zelikman et al., 2022; Chen et al., 2024)" + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.822, + 0.829, + 0.84 + ], + "angle": 0, + "content": "5. Additional Analyses and Discussions" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.848, + 0.886, + 0.895 + ], + "angle": 0, + "content": "Reasoning and information efficiency. One key insight is that DC reduces the need to \"reinvent the wheel\" for each query. By encoding and reusing well-established techniques" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.475, + 0.162 + ], + "angle": 0, + "content": "(e.g., Python-based solving for Game of 24), models can bypass repeated rediscovery of the same strategies. This significantly cuts down reasoning overhead and token usage in subsequent queries, though the initial cost of discovering a robust approach and curating it remains non-trivial." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.169, + 0.476, + 0.41 + ], + "angle": 0, + "content": "DC performs better than majority voting (MV). To test if DC provides advantages over conventional MV at inference, we also tested Sonnet on AIME 2024 and 2025 using both approaches. MV, which selects the most common answer from three independent generations, yielded no improvements over single-shot inference. As seen in Table 4, on AIME 2024, MV performed identically to the baseline \\((23.3\\%)\\), while on AIME 2025, it remained at \\(6.7\\%\\), offering no tangible gain. Even with DC-\\(\\emptyset\\), MV slightly underperformed \\((33.3\\%\\) vs. \\(36.7\\%)\\). In contrast, DC-Cu outperformed MV, reaching \\(50.0\\%\\) on AIME 2024 and \\(36.7\\%\\) on AIME 2025. Unlike MV, which passively aggregates outputs, DC actively refines knowledge over time, eliminating errors and improving solution quality. This confirms that memory-based adaptation is far more effective than simple statistical voting in complex reasoning tasks." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.424, + 0.475, + 0.497 + ], + "angle": 0, + "content": "
TasksClaude 3.5 Sonnet
BLMV(BL)DC-∅MV(DC-∅)DC-Cu.
AIME 202423.323.3336.733.350.0
AIME 20256.76.723.323.336.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.506, + 0.475, + 0.521 + ], + "angle": 0, + "content": "Table 4: Comparison of majority voting (MV) with DC on AIME." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.535, + 0.475, + 0.672 + ], + "angle": 0, + "content": "Clustering of errors and corrections. Our experiments suggest that errors and their corrections often cluster in a latent embedding space. See Figure 10. Once a model acquires a high-quality heuristic for a cluster of related queries, it can apply this knowledge to tightly embedded neighbors. However, faulty heuristics that slip into memory can be equally amplified. Ensuring that the memory remains \"clean\" thus requires careful curation and, if necessary, pruning to avoid propagating erroneous strategies." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.678, + 0.475, + 0.83 + ], + "angle": 0, + "content": "Transferability of memory content across models. We also observed that larger models, such as Claude 3.5 Sonnet and GPT-4o, can sometimes produce higher-quality strategies that, in principle, could benefit smaller models if the memory is transferred. However, if a smaller model lacks the generative capacity to interpret or refine those strategies correctly, its performance can stall or degrade. In our ablation experiments, we observed mixed results. This indicates that memory entries, while helpful, cannot fully compensate for inadequate base capability." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.837, + 0.475, + 0.882 + ], + "angle": 0, + "content": "Long-context generation versus understanding. Most large LLMs excel at processing lengthy inputs but struggle to generate comparably long\\(^{12}\\) and well-organized outputs." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.085, + 0.887, + 0.221 + ], + "angle": 0, + "content": "DC's memory curation after each query can demand precise reproduction or modification of prior knowledge. We observed instances where the model merely references or abbreviates the existing memory (e.g., \"Previous content [...] preserved\") instead of explicitly rewriting it. Such truncated memory updates can reduce the quality of stored heuristics over time. Potential solutions include maintaining a structured, external database that the LM can reference without regenerating large swaths of text each time." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.228, + 0.887, + 0.395 + ], + "angle": 0, + "content": "Retrieval bottlenecks and noise. While retrieval-based variants (e.g., DC-RS) can substantially improve accuracy, poorly filtered retrieval mechanisms can introduce confusion, particularly when presented with highly diverse or loosely related queries. For example, in our experiments, GPT-4o's performance occasionally dipped in GPQA-Diamond due to suboptimal retrieval choices. This underscores the importance of robust retrieval methods (e.g., dense vector search, advanced ranking algorithms) that can reliably surface higher quality exemplars or heuristics while suppressing irrelevant or contradictory texts." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.402, + 0.887, + 0.538 + ], + "angle": 0, + "content": "Hierarchical and modular memory. As LLM deployments scale, specialized domains may benefit from subdividing or hierarchically organizing memory. For instance, a system could maintain separate curated memories for topics like combinatorics or physics, each updated by a specialized retrieval or curation mechanism. This may reduce the load on a unified memory store and help isolate errors within their respective domains, with the goal of further improving the clarity and reliability of retrieved heuristics." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.546, + 0.887, + 0.651 + ], + "angle": 0, + "content": "Time and token complexity. Although DC requires memory curation after each query, it optimizes efficiency over time by reducing redundant computation and token usage.[13] As the model retrieves and refines solutions, memory maintenance becomes a net gain rather than a cost. However, its sequential structure still poses challenges for large-scale parallel or batch tasks requiring independent inference." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.659, + 0.887, + 0.855 + ], + "angle": 0, + "content": "Smaller or more specialized models and R1 experiments. Finally, we note that smaller models, such as GPT-4o-mini, show limited gains under DC, as seen in Table 3. Additional experiments with \"R1\" models such as DeepSeek R1 and o1 similarly showed minimal or inconsistent improvements. In these cases, these models' generative ability appears too restricted to produce reliable strategies for storage or to interpret retrieved heuristics effectively. The solutions were far too verbose and long. Without sufficiently accurate and efficient base solutions, memory curation cannot yield substantial gains. This limitation ties back to the core premise that effective DC demands a capable foundation model to seed and refine the curated knowledge." + }, + { + "type": "page_footnote", + "bbox": [ + 0.497, + 0.864, + 0.885, + 0.89 + ], + "angle": 0, + "content": "13On AIME 2024, Claude Sonnet averaged 370 tokens under BL, 494 under DC-\\(\\emptyset\\), 1035 under DC-RS, and 1831 under DC-Cu." + }, + { + "type": "page_footnote", + "bbox": [ + 0.106, + 0.89, + 0.285, + 0.905 + ], + "angle": 0, + "content": "12See, e.g., (Liu et al., 2024b)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.236 + ], + "angle": 0, + "content": "Overall, DC offers a useful and practical framework for continuous, test-time learning in LLMs. Our findings emphasize the synergy between model capacity and memory curation, the importance of structural task similarity and retrieval precision, and the benefits of offloading repeated computations to flexible external stores (e.g., Python scripts). At the same time, alternative mechanisms (e.g., specialized sub-memories or adaptive example ordering) and more sophisticated retrieval techniques (e.g., topological clustering) remain promising directions for further research." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.256, + 0.245, + 0.273 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.281, + 0.476, + 0.373 + ], + "angle": 0, + "content": "We thank Batu El, Sabri Eyuboglu, Tayfun Gur, Emily Shen, Jake Silberg, Elana Simon, and Kyle Swanson for their helpful comments and suggestions. We also thank the members of the James Zou Lab at Stanford for their feedback in the early stages of this project. Suzgun gratefully acknowledges the support of an HAI-SAP Fellowship." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.392, + 0.182, + 0.407 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.416, + 0.476, + 0.446 + ], + "angle": 0, + "content": "Amari, S.-I. Natural gradient works efficiently in learning. Neural computation, 10(2):251-276, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.463, + 0.477, + 0.539 + ], + "angle": 0, + "content": "Arcuschin, I., Janiak, J., Krzyzanowski, R., Rajamanoharan, S., Nanda, N., and Conmy, A. Chain-of-thought reasoning in the wild is not always faithful. In Workshop on Reasoning and Planning for Large Language Models, 2025. URL https://openreview.net/forum?id=L8094Whth0." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.554, + 0.476, + 0.616 + ], + "angle": 0, + "content": "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.631, + 0.476, + 0.69 + ], + "angle": 0, + "content": "Bengio, Y., Louradour, J., Collobert, R., and Weston, J. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pp. 41-48, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.709, + 0.476, + 0.8 + ], + "angle": 0, + "content": "Besta, M., Blach, N., Kubicek, A., Gerstenberger, R., Podstawski, M., Gianinazzi, L., Gajda, J., Lehmann, T., Niewiadomski, H., Nczyk, P., et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.815, + 0.476, + 0.905 + ], + "angle": 0, + "content": "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206-2240. PMLR, 2022." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.416, + 0.477, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.13 + ], + "angle": 0, + "content": "Bottou, L. and Cun, Y. Large scale online learning. Advances in neural information processing systems, 16, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.143, + 0.887, + 0.188 + ], + "angle": 0, + "content": "Bottou, L. and Le Cun, Y. On-line learning for very large data sets. Applied stochastic models in business and industry, 21(2):137-151, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.2, + 0.887, + 0.26 + ], + "angle": 0, + "content": "Boudiaf, M., Mueller, R., Ben Ayed, I., and Bertinetto, L. Parameter-free online test-time adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8344-8353, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.272, + 0.885, + 0.316 + ], + "angle": 0, + "content": "Bulatov, A., Kuratov, Y., and Burtsev, M. Recurrent memory transformer. Advances in Neural Information Processing Systems, 35:11079-11091, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.328, + 0.887, + 0.374 + ], + "angle": 0, + "content": "Chen, Z., Deng, Y., Yuan, H., Ji, K., and Gu, Q. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.386, + 0.887, + 0.46 + ], + "angle": 0, + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.473, + 0.887, + 0.533 + ], + "angle": 0, + "content": "Feng, T., Han, P., Lin, G., Liu, G., and You, J. Thought-retriever: Don't just retrieve raw data, retrieve thoughts, 2024. URL https://openreview.net/forum?id=SkDNQbMQba." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.545, + 0.887, + 0.605 + ], + "angle": 0, + "content": "Feng, Y., Li, F., Song, Z., Zheng, B., and Koehn, P. Learn to remember: Transformer with recurrent memory for document-level machine translation. arXiv preprint arXiv:2205.01546, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.617, + 0.887, + 0.678 + ], + "angle": 0, + "content": "Golovneva, O., O'Brien, S., Pasunuru, R., Wang, T., Zettlemoyer, L., Fazel-Zarandi, M., and Celikyilmaz, A. Pathfinder: Guided search over multi-step reasoning paths. arXiv preprint arXiv:2312.05180, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.689, + 0.887, + 0.749 + ], + "angle": 0, + "content": "Gou, Z., Shao, Z., Gong, Y., Shen, Y., Yang, Y., Duan, N., and Chen, W. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.762, + 0.885, + 0.792 + ], + "angle": 0, + "content": "Graves, A. Generating sequences with recurrent neural networks. arXiv preprint arXiv:1308.0850, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.804, + 0.885, + 0.834 + ], + "angle": 0, + "content": "Graves, A., Wayne, G., and Danihelka, I. Neural Turing machines. arXiv preprint arXiv:1410.5401, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.846, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Gururangan, S., Marasovic, A., Swayamdipta, S., Lo, K., Beltagy, I., Downey, D., and Smith, N. A. Don't stop pretraining: Adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964, 2020." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.145 + ], + "angle": 0, + "content": "Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929-3938. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.155, + 0.476, + 0.215 + ], + "angle": 0, + "content": "He, Z., Karlinsky, L., Kim, D., McAuley, J., Krotov, D., and Feris, R. Camelot: Towards large language models with training-free consolidated associative memory. arXiv preprint arXiv:2402.13449, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.224, + 0.476, + 0.269 + ], + "angle": 0, + "content": "Joulin, A. and Mikolov, T. Inferring algorithmic patterns with stack-augmented recurrent nets. Advances in neural information processing systems, 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.278, + 0.476, + 0.323 + ], + "angle": 0, + "content": "Karpicke, J. D. and Blunt, J. R. Retrieval practice produces more learning than elaborative studying with concept mapping. Science, 331(6018):772-775, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.332, + 0.476, + 0.376 + ], + "angle": 0, + "content": "Karpicke, J. D. and Roediger III, H. L. The critical importance of retrieval for learning. science, 319(5865): 966-968, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.386, + 0.476, + 0.446 + ], + "angle": 0, + "content": "Karpukhin, V., Oguz, B., Min, S., Lewis, P. S., Wu, L., Edunov, S., Chen, D., and Yih, W.-t. Dense passage retrieval for open-domain question answering. In EMNLP (1), pp. 6769-6781, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.455, + 0.476, + 0.53 + ], + "angle": 0, + "content": "Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=Hk1BjCEKvH." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.539, + 0.476, + 0.599 + ], + "angle": 0, + "content": "Kojima, T., Gu, S. S., Reid, M., Matsuo, Y., and Iwasawa, Y. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35: 22199-22213, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.608, + 0.476, + 0.653 + ], + "angle": 0, + "content": "Krause, B., Kahembwe, E., Murray, I., and Renals, S. Dynamic evaluation of transformer language models. arXiv preprint arXiv:1904.08378, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.662, + 0.476, + 0.736 + ], + "angle": 0, + "content": "Lazaridou, A., Gribovskaya, E., Stokowiec, W. J., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering, 2023. URL https://openreview.net/forum?id=hFCUPkSSRE." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.746, + 0.476, + 0.821 + ], + "angle": 0, + "content": "Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., Kuttler, H., Lewis, M., Yih, W.-t., Rocktaschel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in neural information processing systems, 33:9459-9474, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.83, + 0.476, + 0.905 + ], + "angle": 0, + "content": "Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics, 12:157-173, 2024a." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.13 + ], + "angle": 0, + "content": "Liu, X., Dong, P., Hu, X., and Chu, X. Longgenbench: Long-context generation benchmark. arXiv preprint arXiv:2410.04199, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.142, + 0.887, + 0.203 + ], + "angle": 0, + "content": "Liu, Y., Kothari, P., Van Delft, B., Bellot-Gurlet, B., Mordan, T., and Alahi, A. Ttt++: When does self-supervised test-time training fail or thrive? Advances in Neural Information Processing Systems, 34:21808-21820, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.214, + 0.887, + 0.244 + ], + "angle": 0, + "content": "Long, J. Large language model guided tree-of-thought. arXiv preprint arXiv:2305.08291, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.256, + 0.887, + 0.301 + ], + "angle": 0, + "content": "Lopez-Paz, D. and Ranzato, M. Gradient episodic memory for continual learning. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.312, + 0.887, + 0.387 + ], + "angle": 0, + "content": "Lu, P., Peng, B., Cheng, H., Galley, M., Chang, K.-W., Wu, Y. N., Zhu, S.-C., and Gao, J. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36: 43447-43478, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.399, + 0.887, + 0.474 + ], + "angle": 0, + "content": "Madaan, A., Tandon, N., Clark, P., and Yang, Y. Memory-assisted prompt editing to improve gpt-3 after deployment. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2833–2861, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.486, + 0.887, + 0.561 + ], + "angle": 0, + "content": "Madaan, A., Tandon, N., Gupta, P., Hallinan, S., Gao, L., Wegreffe, S., Alon, U., Dziri, N., Prabhumoye, S., Yang, Y., et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.573, + 0.887, + 0.634 + ], + "angle": 0, + "content": "McCloskey, M. and Cohen, N. J. Catastrophic interference in connectionist networks: The sequential learning problem. In Psychology of learning and motivation, volume 24, pp. 109-165. Elsevier, 1989." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.645, + 0.887, + 0.704 + ], + "angle": 0, + "content": "Mikolov, T., Karafiát, M., Burget, L., Cernocký, J., and Khudanpur, S. Recurrent neural network based language model. In *Interspeech*, volume 2, pp. 1045–1048. Makuhari, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.717, + 0.887, + 0.762 + ], + "angle": 0, + "content": "Munkhdalai, T., Sordoni, A., Wang, T., and Trischler, A. Metalearned neural memory. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.773, + 0.887, + 0.834 + ], + "angle": 0, + "content": "Niu, S., Wu, J., Zhang, Y., Chen, Y., Zheng, S., Zhao, P., and Tan, M. Efficient test-time model adaptation without forgetting. In International conference on machine learning, pp. 16888-16905. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.845, + 0.887, + 0.905 + ], + "angle": 0, + "content": "Qin, Y., Liang, S., Ye, Y., Zhu, K., Yan, L., Lu, Y., Lin, Y., Cong, X., Tang, X., Qian, B., et al. Toollm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023." + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.085, + 0.887, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.162 + ], + "angle": 0, + "content": "Rannen-Triki, A., Bornschein, J., Pascanu, R., Hutter, M., György, A., Galashov, A., Teh, Y. W., and Titsias, M. K. Revisiting dynamic evaluation: Online adaptation for large language models. arXiv preprint arXiv:2403.01518, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.169, + 0.479, + 0.248 + ], + "angle": 0, + "content": "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Ti67584b98." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.256, + 0.478, + 0.303 + ], + "angle": 0, + "content": "Roediger, H. L. and Butler, A. C. The critical role of retrieval practice in long-term retention. Trends in cognitive sciences, 15(1):20-27, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.311, + 0.477, + 0.389 + ], + "angle": 0, + "content": "Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N., and Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.397, + 0.477, + 0.475 + ], + "angle": 0, + "content": "Shen, Y., Song, K., Tan, X., Li, D., Lu, W., and Zhuang, Y. HuggingGPT: Solving AI tasks with chatGPT and its friends in hugging face. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=yHdTscY6Ci." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.482, + 0.477, + 0.559 + ], + "angle": 0, + "content": "Shi, F., Fried, D., Ghazvininejad, M., Zettlemoyer, L., and Wang, S. I. Natural language to code translation with execution. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 3533-3546, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.568, + 0.477, + 0.661 + ], + "angle": 0, + "content": "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., Das, D., and Wei, J. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.669, + 0.479, + 0.837 + ], + "angle": 0, + "content": "Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. REPLUG: Retrievalaugmented black-box language models. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8371-8384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.463. URL https://aclanthology.org/2024.nacl-long.463/." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.845, + 0.478, + 0.907 + ], + "angle": 0, + "content": "Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36:8634-8652, 2023." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.148 + ], + "angle": 0, + "content": "Sun, Y., Wang, X., Liu, Z., Miller, J., Efros, A., and Hardt, M. Test-time training with self-supervision for generalization under distribution shifts. In International conference on machine learning, pp. 9229-9248. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.158, + 0.888, + 0.22 + ], + "angle": 0, + "content": "Sun, Y., Li, X., Dalal, K., Xu, J., Vikram, A., Zhang, G., Dubois, Y., Chen, X., Wang, X., Koyejo, S., et al. Learning to (learn at test time): Rnns with expressive hidden states. arXiv preprint arXiv:2407.04620, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.231, + 0.888, + 0.293 + ], + "angle": 0, + "content": "Surís, D., Menon, S., and Vondrick, C. Vipergpt: Visual inference via python execution for reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 11888-11898, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.304, + 0.888, + 0.351 + ], + "angle": 0, + "content": "Suzgun, M. and Kalai, A. T. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.362, + 0.888, + 0.422 + ], + "angle": 0, + "content": "Suzgun, M., Gehrmann, S., Belinkov, Y., and Shieber, S. M. Memory-augmented recurrent neural networks can learn generalized dyck languages. arXiv preprint arXiv:1911.03329, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.434, + 0.888, + 0.511 + ], + "angle": 0, + "content": "Suzgun, M., Melas-Kyriazi, L., and Jurafsky, D. Follow the wisdom of the crowd: Effective text generation via minimum bayes risk decoding. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 4265-4293, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.523, + 0.888, + 0.613 + ], + "angle": 0, + "content": "Suzgun, M., Scales, N., Scharli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q., Chi, E., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.626, + 0.888, + 0.703 + ], + "angle": 0, + "content": "Suzgun, M., Shieber, S. M., and Jurafsky, D. string2string: A modern python library for string-to-string algorithms. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pp. 278-285, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.714, + 0.888, + 0.79 + ], + "angle": 0, + "content": "Syed, N. A., Liu, H., and Sung, K. K. Handling concept drifts in incremental learning with support vector machines. In Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 317-321, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.802, + 0.888, + 0.834 + ], + "angle": 0, + "content": "Thrun, S. and Mitchell, T. M. Lifelong robot learning. Robotics and autonomous systems, 15(1-2):25-46, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.845, + 0.888, + 0.907 + ], + "angle": 0, + "content": "Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.072 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.13 + ], + "angle": 0, + "content": "Wang, D., Shelhamer, E., Liu, S., Olshausen, B., and Darrell, T. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.14, + 0.476, + 0.231 + ], + "angle": 0, + "content": "Wang, X., Wei, J., Schuurmans, D., Le, Q. V., Chi, E. H., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.24, + 0.476, + 0.3 + ], + "angle": 0, + "content": "Wang, Y., Gao, Y., Chen, X., Jiang, H., Li, S., Yang, J., Yin, Q., Li, Z., Li, X., Yin, B., et al. Memoryllm: Towards self-updatable large language models. arXiv preprint arXiv:2402.04624, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.31, + 0.476, + 0.431 + ], + "angle": 0, + "content": "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., Li, T., Ku, M., Wang, K., Zhuang, A., Fan, R., Yue, X., and Chen, W. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b. URL https://openreview.net/forum?id=y10DM6R2r3." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.44, + 0.476, + 0.515 + ], + "angle": 0, + "content": "Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.526, + 0.476, + 0.556 + ], + "angle": 0, + "content": "Weston, J., Chopra, S., and Bordes, A. Memory networks. arXiv preprint arXiv:1410.3916, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.566, + 0.476, + 0.641 + ], + "angle": 0, + "content": "Yang, L., Yu, Z., Zhang, T., Cao, S., Xu, M., Zhang, W., Gonzalez, J. E., and Cui, B. Buffer of thoughts: Thought-augmented reasoning with large language models. Advances in Neural Information Processing Systems, 37: 113519-113544, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.651, + 0.476, + 0.696 + ], + "angle": 0, + "content": "Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T. L., Cao, Y., and Narasimhan, K. Tree of Thoughts: Deliberate problem solving with large language models, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.706, + 0.476, + 0.765 + ], + "angle": 0, + "content": "Yuksekgonul, M., Bianchi, F., Boen, J., Liu, S., Lu, P., Huang, Z., Guestrin, C., and Zou, J. Optimizing generative ai by backpropagating language model feedback. Nature, 639:609-616, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.775, + 0.476, + 0.821 + ], + "angle": 0, + "content": "Zelikman, E., Wu, Y., Mu, J., and Goodman, N. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.83, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Zhang, K., Kang, Y., Zhao, F., and Liu, X. LLM-based medical assistant personalization with short- and long-term memory coordination. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.887, + 0.161 + ], + "angle": 0, + "content": "Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2386-2398, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naac1-long.132. URL https://aclanthology.org/2024.naac1-long.132/." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.171, + 0.887, + 0.23 + ], + "angle": 0, + "content": "Zhang, M., Levine, S., and Finn, C. Memo: Test time robustness via adaptation and augmentation. Advances in neural information processing systems, 35:38629-38642, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.241, + 0.887, + 0.316 + ], + "angle": 0, + "content": "Zhang, T., Patil, S. G., Jain, N., Shen, S., Zaharia, M., Stoica, I., and Gonzalez, J. E. RAFT: Adapting language model to domain specific RAG. In First Conference on Language Modeling, 2024b. URL https://openreview.net/forum?id=rzQGHXNReU." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.326, + 0.887, + 0.448 + ], + "angle": 0, + "content": "Zhong, Z., Lei, T., and Chen, D. Training language models with memory augmentation. In Goldberg, Y., Kozareva, Z., and Zhang, Y. (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5657-5673, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.382. URL https://aclanthology.org/2022.emnlp-main.382/." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.457, + 0.887, + 0.532 + ], + "angle": 0, + "content": "Zhou, D., Scharli, N., Hou, L., Wei, J., Scales, N., Wang, X., Schuurmans, D., Cui, C., Bousquet, O., Le, Q., et al. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625, 2022." + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.085, + 0.887, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.084, + 0.36, + 0.101 + ], + "angle": 0, + "content": "A. Background & Related Work" + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.11, + 0.368, + 0.126 + ], + "angle": 0, + "content": "A.1. Test-time learning (online learning)" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.134, + 0.475, + 0.449 + ], + "angle": 0, + "content": "Test-time learning—also referred to as online or incremental learning (adaptation)—encompasses a family of methods in which a stochastic model updates its predictions by incorporating information seen during inference, without undergoing conventional, full-scale offline finetuning. Early versions of test-time adaptation focused on local or transductive learning, where a model re-fit or re-weighted its parameters with each new test instance or batch (McCloskey & Cohen, 1989; Thrun & Mitchell, 1995; Amari, 1998; Syed et al., 1999; Bottou & Cun, 2003; Bottou & Le Cun, 2005, inter alia). In computer vision, for example, methods like test-time training have been shown to mitigate domain shifts by optimizing a self-supervised loss on incoming data (Wang et al., 2020; Sun et al., 2020; Liu et al., 2021; Boudiaf et al., 2022; Niu et al., 2022; Zhang et al., 2022; Sun et al., 2024). In the context of natural-language generation, test-time adaptation has appeared under terms such as \"dynamic evaluation\" (Mikolov et al., 2010; Graves, 2013; Krause et al., 2019; Rannen-Triki et al., 2024), in which a language model is updated with gradient steps on the test-time data itself." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.459, + 0.475, + 0.669 + ], + "angle": 0, + "content": "However, directly updating language model weights at test time can be computationally expensive and requires the capacity to modify parameters. For large-scale, black-box APIs (e.g., GPT-3 or Claude), one often lacks the ability to perform parameter updates easily, thereby making such an approach difficult, if not completely infeasible (Shi et al., 2024). To address this, a growing body of work has explored parameter-free adaptation, whereby one structurally modifies immediate model inputs (e.g., prompting) or draws from external memory to \"update\" the model's effective reasoning. Our approach aligns with this direction by allowing an LM to iteratively record solutions, explanations, or heuristics in an external memory component over successive interactions, avoiding weight updates entirely." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.678, + 0.475, + 0.903 + ], + "angle": 0, + "content": "In the broader test-time learning literature, reflexive, compositional, and iterative refinement approaches like Reflexion (Shinn et al., 2023), Self-Refine (Madaan et al., 2023), (Self-)Critic (Gou et al., 2023), Chameleon (Lu et al., 2023), Meta-Prompting (Suzgun & Kalai, 2024), and Self-RAG (Asai et al., 2023) inter alia, use feedback loops or verification mechanisms to correct mistakes in solutions. TextGrad (Yuksekgonul et al., 2025) similarly draws on the notion of \"textual gradients\" as an alternative to parameter-based gradients and provides a pathway for improvement based on the content of mistakes. Our proposed DC framework differs by focusing explicitly on storing generalizable heuristics, solutions, or meta-level insights that can be repeatedly retrieved and applied across tasks, not just to correct a single solution. Furthermore, DC does not require a" + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.086, + 0.885, + 0.13 + ], + "angle": 0, + "content": "new training loop for each batch or scenario; instead, the memory itself is updated to reflect newly found solutions, errors, or strategies without touching the model weights." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.148, + 0.765, + 0.162 + ], + "angle": 0, + "content": "A.2. Test-time compute and reasoning" + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.171, + 0.886, + 0.412 + ], + "angle": 0, + "content": "It is now widely known and accepted that contemporary LLMs such as GPT-4 can exhibit substantial improvements in reasoning and generation capability when additional compute is devoted to inference-time strategies (e.g., chain-of-thought prompting (Wei et al., 2022; Kojima et al., 2022; Zhou et al., 2022), tree-of-thought expansions (Yao et al., 2023; Long, 2023), minimum Bayes risk decoding (Suzgun et al., 2023a; Shi et al., 2022; Golovneva et al., 2023), majority-vote sampling (Wang et al., 2023)). Prompting methods such as Tree-of-Thought (Yao et al., 2023), Graph-of-Thought (Besta et al., 2024), and other non-linear compositional reasoning paradigms systematically enlarge the inference-time search space. They allow models to explore various reasoning paths and exploit consensus or iterative corrections to arrive at more accurate and reliable conclusions (Wei et al., 2022; Wang et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.421, + 0.886, + 0.629 + ], + "angle": 0, + "content": "However, these expansions come at the cost of increased computational overhead per test instance (Yao et al., 2023). They are, however, typically ephemeral: once a solution is generated, subsequent tasks or input samples do not generally benefit from the heavy compute spent earlier, unless the user manually engineers advanced prompt-sharing or in-context demonstration strategies. Cf. (Zelikman et al., 2022). Our work, on the other hand, aims to reduce repeated overhead across multiple test instances of a similar domain by building a memory that persists from one query to the next. This memory not only reduces repetitive mistakes, but also consolidates and codifies robust solution strategies—effectively amortizing or \"sharing\" the cost of initial reflection across future tasks.[14]" + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.64, + 0.886, + 0.714 + ], + "angle": 0, + "content": "Another related thread involves tool usage or code execution (Schick et al., 2023; Lu et al., 2023; Shen et al., 2023; Qin et al., 2023; Surís et al., 2023; Suzgun & Kalai, 2024). These studies have explored how LLMs can call external Python interpreters, symbolic solvers, or other specialized" + }, + { + "type": "page_footnote", + "bbox": [ + 0.502, + 0.724, + 0.886, + 0.901 + ], + "angle": 0, + "content": "14Some lines of work—such as majority voting or sampling-based self-consistency—combine multiple inference passes for a single question but still lack a persistent knowledge base that spans different queries. DC differs in that we treat consecutive tasks in a sequence as a chance to refine a persistent, external store of learned lessons. The memory curation step selectively compiles relevant solutions, heuristics, expansions, or code blocks into a form that can be reused for upcoming queries. Thus, while the compute for the first few tasks may be higher, future tasks become simpler because the system can consult and adapt previously curated knowledge. This approach echoes the underlying motivation of test-time training—performing ongoing improvement at inference—but capitalizes on a cheap, external memory update in lieu of repeated or expensive parameter updates." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.222 + ], + "angle": 0, + "content": "services and APIs to offload complex computations. Our empirical findings too illustrate that once an LLM under DC recognizes a systematic way (e.g., Python-based brute force algorithm) to handle a certain class of problems (like arithmetic puzzles), it can store that approach in memory and repeatedly retrieve it. Thus, DC not only invests extra compute in a single session but spreads that computational benefit across multiple interactions, effectively learning to use tools more consistently and reliably over time." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.238, + 0.449, + 0.254 + ], + "angle": 0, + "content": "A.3. Memory-augmented generation and reasoning" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.261, + 0.478, + 0.594 + ], + "angle": 0, + "content": "Augmenting language models with external memory has seen renewed interest in recent years (Munkhdalai et al., 2019; Guu et al., 2020; Khandelwal et al., 2020; Bulatov et al., 2022; Borgeaud et al., 2022; Zhong et al., 2022; Feng et al., 2022; He et al., 2024; Wang et al., 2024a)—see also (Graves et al., 2014; Weston et al., 2014; Joulin & Mikolov, 2015; Suzgun et al., 2019) for early studies. Modern retrieval-augmented LLM approaches generally consult an external corpus of documents (i.e., a knowledge base) to improve factuality and reduce hallucination (Lewis et al., 2020; Lazaridou et al., 2023; Vu et al., 2023; Zhang et al., 2024b), but the retrieval corpus is almost always fixed prior to inference and does not evolve over time. These methods have been especially effective for open-domain question answering (Lewis et al., 2020; Guu et al., 2020; Karpukhin et al., 2020), where the model's own parameters may not hold all relevant knowledge. In practice, retrieval augmentation typically involves selecting and concatenating top-\\(k\\) passages from a knowledge-base—while useful for factual queries, the approach, however, does not inherently solve iterative improvement or learning from mistakes in the sense of building upon prior solutions at inference time." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.601, + 0.478, + 0.889 + ], + "angle": 0, + "content": "Another line of research more closely aligns with our vision by storing not just reference knowledge but also the reasoning processes and solution strategies of language models. Several recent works have explored this direction. Thought-R retriever (Feng et al., 2024) logs the model's chain-of-thought from past queries and uses them for new, analogous queries. Buffer-of-Thoughts (BoT; Yang et al., 2025) takes a slightly different approach by distilling high-level \"thought templates\" from problem-solving processes, though it relies on predefined templates that seem to be tailored towards specific task types that were considered in their experiments. Madaan et al. (2022) have demonstrated that deployed models like GPT-3 can be improved through memory mechanisms that capture user feedback on errors, preventing similar mistakes in future interactions. Zhang et al. (2024a) have proposed a dual memory architecture combining long-term and short-term storage for medical applications, though their approach requires fine-tuning to incorporate new knowledge." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.889, + 0.266 + ], + "angle": 0, + "content": "While these works reveal the many strategies for harnessing memory or feedback, DC emphasizes selectively storing the most relevant insights and heuristics. DC aims to avoid naive accumulation of full raw transcripts and ephemeral chain-of-thought expansions that can lead to memory bloat. Moreover, unlike methods that assume the model can be retrained or finetuned to incorporate memory items, we remain fully external and training-free; this aligns with \"plug-and-play\" usage principle, in which an off-the-shelf model is augmented by an external memory that it reads from and writes to, but does not require any gradient-based adaptation." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.695, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.367, + 0.101 + ], + "angle": 0, + "content": "B. Additional Figures and Tables" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.11, + 0.553, + 0.126 + ], + "angle": 0, + "content": "B.1. Performance Comparison of Baseline and DC-RS Approaches" + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.141, + 0.703, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.418, + 0.886, + 0.445 + ], + "angle": 0, + "content": "Figure 8: Overall performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.452, + 0.703, + 0.716 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.728, + 0.886, + 0.755 + ], + "angle": 0, + "content": "Figure 9: Overall performance of GPT-40 under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.085, + 0.383, + 0.101 + ], + "angle": 0, + "content": "B.2. Clustering of Errors and Corrections" + }, + { + "type": "image_caption", + "bbox": [ + 0.271, + 0.123, + 0.752, + 0.138 + ], + "angle": 0, + "content": "tSNE Visualization of the Question Embeddings in GPQA Diamond" + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.138, + 0.871, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.531, + 0.888, + 0.571 + ], + "angle": 0, + "content": "Figure 10: t-SNE visualization of the embeddings of the raw questions in GPQA-Diamond. Note that correct and incorrect answers often cluster in latent embedding space. DC can help transfer learned strategies within these clusters, but without careful curation, erroneous heuristics may also spread, thus requiring careful memory refinement and verification of solution strategies." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.085, + 0.525, + 0.101 + ], + "angle": 0, + "content": "B.3. Evolution of Memory Content under Dynamic Cheatsheet" + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.121, + 0.871, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.491, + 0.89, + 0.531 + ], + "angle": 0, + "content": "Figure 11: This figure illustrates how memory content of GPT-4o evolves over time in Game of 24, quantified using a longest-common-subsequence (LCS)-similarity metric (Suzgun et al., 2024) between consecutive states (measured at the word level). While both DC-Cu and DC-RS show high stability after the first few iterations, DC-Cu experiences slightly greater fluctuations in the second half of inference." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.086, + 0.476, + 0.101 + ], + "angle": 0, + "content": "B.4. Solution Generator and Memory Curator Prompts" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.109, + 0.478, + 0.125 + ], + "angle": 0, + "content": "B.4.1. Prompt Used by the Generator Model in Baseline" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.14, + 0.827, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.526, + 0.888, + 0.568 + ], + "angle": 0, + "content": "Figure 12: Prompt used in the baseline (BL) approach, where the model receives minimal instructions. The prompt simply asks the model to answer the given question without any structured guidance, additional reasoning steps, or tool-use encouragement. This setup represents a traditional one-off inference method, reflecting how LLMs typically operate by default." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.086, + 0.623, + 0.101 + ], + "angle": 0, + "content": "B.4.2. Prompt Used by the Generator Model in DR, FH, and DC Approaches" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.123, + 0.419, + 0.133 + ], + "angle": 0, + "content": "GENERATOR (PROBLEM SOLVER)" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.136, + 0.709, + 0.146 + ], + "angle": 0, + "content": "Instruction: You are an expert problem-solving assistant tasked with analyzing and solving various questions using" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.146, + 0.607, + 0.155 + ], + "angle": 0, + "content": "a combination of your expertise and provided reference materials. Each task will include:" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.155, + 0.422, + 0.164 + ], + "angle": 0, + "content": "1. A specific question or problem to solve" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.164, + 0.625, + 0.173 + ], + "angle": 0, + "content": "2. A cheatsheet containing relevant strategies, patterns, and examples from similar problems" + }, + { + "type": "list", + "bbox": [ + 0.261, + 0.146, + 0.625, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.188, + 0.38, + 0.197 + ], + "angle": 0, + "content": "##1.ANALYSIS&STRATEGY" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.201, + 0.53, + 0.211 + ], + "angle": 0, + "content": "- Carefully analyze both the question and cheatsheet before starting" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.211, + 0.631, + 0.219 + ], + "angle": 0, + "content": "- Search for and identify any applicable patterns, strategies, or examples within the cheatsheet" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.219, + 0.504, + 0.228 + ], + "angle": 0, + "content": "- Create a structured approach to solving the problem at hand" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.228, + 0.555, + 0.238 + ], + "angle": 0, + "content": "- Review and document any limitations in the provided reference materials" + }, + { + "type": "list", + "bbox": [ + 0.262, + 0.201, + 0.631, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.253, + 0.395, + 0.262 + ], + "angle": 0, + "content": "## 2. SOLUTION DEVELOPMENT" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.265, + 0.578, + 0.274 + ], + "angle": 0, + "content": "- Present your solution using clear, logical steps that others can follow and review" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.274, + 0.568, + 0.284 + ], + "angle": 0, + "content": "- Explain your reasoning and methodology before presenting final conclusions" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.284, + 0.493, + 0.293 + ], + "angle": 0, + "content": "- Provide detailed explanations for each step of the process" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.293, + 0.515, + 0.302 + ], + "angle": 0, + "content": "- Check and verify all assumptions and intermediate calculations" + }, + { + "type": "list", + "bbox": [ + 0.262, + 0.265, + 0.578, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.317, + 0.381, + 0.326 + ], + "angle": 0, + "content": "##3.PROGRAMMINGTASKS" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.329, + 0.362, + 0.338 + ], + "angle": 0, + "content": "When coding is required:" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.339, + 0.402, + 0.348 + ], + "angle": 0, + "content": "- Write clean, efficient Python code" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.348, + 0.669, + 0.357 + ], + "angle": 0, + "content": "- Follow the strict code formatting and execution protocol (always use the Python code formatting block;" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.357, + 0.669, + 0.366 + ], + "angle": 0, + "content": "furthermore, after the code block, always explicitly request execution by appending: \"EXECUTE CODE!\":" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.367, + 0.308, + 0.375 + ], + "angle": 0, + "content": "```\n``python" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.375, + 0.333, + 0.384 + ], + "angle": 0, + "content": "Your code here" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.395, + 0.335, + 0.403 + ], + "angle": 0, + "content": "EXECUTE CODE!" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.408, + 0.613, + 0.417 + ], + "angle": 0, + "content": "- All required imports and dependencies should be clearly declared at the top of your code" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.418, + 0.555, + 0.427 + ], + "angle": 0, + "content": "- Include clear inline comments to explain any complex programming logic" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.427, + 0.468, + 0.436 + ], + "angle": 0, + "content": "- Perform result validation after executing your code" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.436, + 0.536, + 0.445 + ], + "angle": 0, + "content": "- Apply optimization techniques from the cheatsheet when applicable" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.445, + 0.686, + 0.455 + ], + "angle": 0, + "content": "- The code should be completely self-contained without external file dependencies—it should be ready to be" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.455, + 0.343, + 0.464 + ], + "angle": 0, + "content": "executed right away" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.464, + 0.585, + 0.474 + ], + "angle": 0, + "content": "- Do not include any placeholders, system-specific paths, or hard-coded local paths" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.474, + 0.483, + 0.483 + ], + "angle": 0, + "content": "- Feel free to use standard and widely-used pip packages" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.483, + 0.505, + 0.492 + ], + "angle": 0, + "content": "- Opt for alternative methods if errors persist during execution" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.492, + 0.573, + 0.502 + ], + "angle": 0, + "content": "- Exclude local paths and engine-specific settings (e.g., avoid configurations like" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.502, + 0.474, + 0.511 + ], + "angle": 0, + "content": "chess.engineSimpleEngine.popen_uci(\"/usr/bin/stockfish\")" + }, + { + "type": "list", + "bbox": [ + 0.262, + 0.408, + 0.686, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.526, + 0.388, + 0.534 + ], + "angle": 0, + "content": "## 4. FINAL ANSWER FORMAT" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.538, + 0.492, + 0.547 + ], + "angle": 0, + "content": "ALWAYS present your final answer in the following format:" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.549, + 0.333, + 0.557 + ], + "angle": 0, + "content": "FINAL ANSWER:" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.56, + 0.303, + 0.568 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.569, + 0.321, + 0.577 + ], + "angle": 0, + "content": "(final answer)" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.578, + 0.306, + 0.586 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.588, + 0.59, + 0.598 + ], + "angle": 0, + "content": "N.B. Make sure that the final answer is properly wrapped inside the block." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.601, + 0.542, + 0.611 + ], + "angle": 0, + "content": "* For multiple-choice questions: Only provide the letter choice (e.g., (A))" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.611, + 0.515, + 0.621 + ], + "angle": 0, + "content": "* For numerical answers: Only provide the final number (e.g., 42)" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.621, + 0.64, + 0.63 + ], + "angle": 0, + "content": "* For other types of answers, including free-response answers: Provide the complete final answer" + }, + { + "type": "list", + "bbox": [ + 0.262, + 0.601, + 0.64, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.634, + 0.303, + 0.643 + ], + "angle": 0, + "content": "Example:" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.643, + 0.384, + 0.652 + ], + "angle": 0, + "content": "Q: What is the meaning of life?" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.652, + 0.288, + 0.66 + ], + "angle": 0, + "content": "A: [..]" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.661, + 0.333, + 0.669 + ], + "angle": 0, + "content": "FINAL ANSWER:" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.672, + 0.303, + 0.679 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.681, + 0.275, + 0.688 + ], + "angle": 0, + "content": "42" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.689, + 0.307, + 0.697 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.713, + 0.323, + 0.722 + ], + "angle": 0, + "content": "CHEATSHEET:" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.731, + 0.333, + 0.74 + ], + "angle": 0, + "content": "[CHEATSHEET]" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.742, + 0.303, + 0.749 + ], + "angle": 0, + "content": "\"" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.771, + 0.442, + 0.781 + ], + "angle": 0, + "content": "Now it is time to solve the following question." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.783, + 0.337, + 0.792 + ], + "angle": 0, + "content": "CURRENTINPUT:" + }, + { + "type": "title", + "bbox": [ + 0.262, + 0.802, + 0.322, + 0.812 + ], + "angle": 0, + "content": "[QUESTION]" + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.834, + 0.888, + 0.911 + ], + "angle": 0, + "content": "Figure 13: Generator prompt used in the DR, FH, and DC approaches, where the model receives structured high-level instructions on solution development, strategy selection, and tool usage. This prompt explicitly encourages Python code generation and execution for computational tasks. Notably, this same structured prompt is used in all non-BL methods, including DC-Ø, DR, FH, DC-Cu, and DC-RS. We also remark that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define \\( M_{i} \\) as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.497, + 0.935 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.085, + 0.553, + 0.1 + ], + "angle": 0, + "content": "B.4.3. Prompt Used by the Memory Curation Model under DC-RS" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.123, + 0.347, + 0.132 + ], + "angle": 0, + "content": "CHEATSHEET CURATOR" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.135, + 0.327, + 0.144 + ], + "angle": 0, + "content": "Purpose and Goals" + }, + { + "type": "text", + "bbox": [ + 0.237, + 0.144, + 0.727, + 0.17 + ], + "angle": 0, + "content": "You are responsible for maintaining, refining, and optimizing the Dynamic Cheatsheet, which serves as a compact yet evolving repository of problem-solving strategies, reusable code snippets, and meta-reasoning techniques. Your goal is to enhance the model's long-term performance by continuously updating the cheatsheet with high-value insights while filtering out redundant or trivial information." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.171, + 0.689, + 0.18 + ], + "angle": 0, + "content": "- The cheatsheet should include quick, accurate, reliable, and practical solutions to a range of technical and creative challenges." + }, + { + "type": "text", + "bbox": [ + 0.237, + 0.18, + 0.726, + 0.198 + ], + "angle": 0, + "content": "- After seeing each input, you should improve the content of the cheatsheet, synthesizing lessons, insights, tricks, and errors learned from past problems and adapting to new challenges." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.208, + 0.335, + 0.217 + ], + "angle": 0, + "content": "Core Responsibilities" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.219, + 0.358, + 0.228 + ], + "angle": 0, + "content": "Selective Knowledge Retention:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.228, + 0.699, + 0.237 + ], + "angle": 0, + "content": "- Preserve only high-value strategies, code blocks, insights, and reusable patterns that significantly contribute to problem-solving." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.237, + 0.558, + 0.245 + ], + "angle": 0, + "content": "- Discard redundant, trivial, or highly problem-specific details that do not generalize well." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.245, + 0.612, + 0.254 + ], + "angle": 0, + "content": "- Ensure that previously effective solutions remain accessible while incorporating new, superior methods." + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.228, + 0.699, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.256, + 0.39, + 0.264 + ], + "angle": 0, + "content": "Continuous Refinement & Optimization:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.265, + 0.588, + 0.274 + ], + "angle": 0, + "content": "- Improve existing strategies by incorporating more efficient, elegant, or generalizable techniques." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.274, + 0.532, + 0.282 + ], + "angle": 0, + "content": "- Remove duplicate entries or rephrase unclear explanations for better readability." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.282, + 0.521, + 0.29 + ], + "angle": 0, + "content": "- Introduce new meta-strategies based on recent problem-solving experiences." + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.265, + 0.588, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.293, + 0.338, + 0.301 + ], + "angle": 0, + "content": "Structure & Organization:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.301, + 0.485, + 0.309 + ], + "angle": 0, + "content": "- Maintain a well-organized cheatsheet with clearly defined sections:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.309, + 0.42, + 0.318 + ], + "angle": 0, + "content": "- Reusable Code Snippets and Solution Strategies" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.318, + 0.378, + 0.326 + ], + "angle": 0, + "content": "- General Problem-Solving Heuristics" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.326, + 0.388, + 0.334 + ], + "angle": 0, + "content": "- Optimization Techniques & Edge Cases" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.334, + 0.378, + 0.343 + ], + "angle": 0, + "content": "-Specialized Knowledge & Theorems" + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.301, + 0.485, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.343, + 0.584, + 0.352 + ], + "angle": 0, + "content": "- Use tagging (e.g., Q14, Q22) to reference previous problems that contributed to a given strategy." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.366, + 0.364, + 0.374 + ], + "angle": 0, + "content": "Principles and Best Practices" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.376, + 0.372, + 0.385 + ], + "angle": 0, + "content": "For every new problem encountered:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.385, + 0.383, + 0.394 + ], + "angle": 0, + "content": "1. Evaluate the Solution's Effectiveness" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.394, + 0.372, + 0.402 + ], + "angle": 0, + "content": "- Was the applied strategy optimal?" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.402, + 0.498, + 0.411 + ], + "angle": 0, + "content": "- Could the solution be improved, generalized, or made more efficient?" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.411, + 0.555, + 0.419 + ], + "angle": 0, + "content": "- Does the cheatsheet already contain a similar strategy, or should a new one be added?" + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.376, + 0.555, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.421, + 0.422, + 0.43 + ], + "angle": 0, + "content": "2. Curate & Document the Most Valuable Insights" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.43, + 0.655, + 0.439 + ], + "angle": 0, + "content": "- Extract key algorithms, heuristics, and reusable code snippets that would help solve similar problems in the future." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.439, + 0.521, + 0.447 + ], + "angle": 0, + "content": "- Identify patterns, edge cases, and problem-specific insights worth retaining." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.447, + 0.546, + 0.455 + ], + "angle": 0, + "content": "- If a better approach than a previously recorded one is found, replace the old version." + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.43, + 0.655, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.458, + 0.384, + 0.466 + ], + "angle": 0, + "content": "3. Maintain Concise, Actionable Entries" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.466, + 0.468, + 0.474 + ], + "angle": 0, + "content": "- Keep explanations clear, actionable, concise, and to the point." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.474, + 0.474, + 0.482 + ], + "angle": 0, + "content": "- Include only the most effective and widely applicable methods." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.482, + 0.542, + 0.491 + ], + "angle": 0, + "content": "- Seek to extract useful and general solution strategies and/or Python code snippets." + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.466, + 0.542, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.494, + 0.354, + 0.502 + ], + "angle": 0, + "content": "4. Implement a Usage Counter" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.503, + 0.671, + 0.511 + ], + "angle": 0, + "content": "Each entry must include a usage count: Increase the count every time a strategy is successfully used in problem-solving." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.512, + 0.523, + 0.52 + ], + "angle": 0, + "content": "- Use the count to prioritize frequently used solutions over rarely applied ones." + }, + { + "type": "list", + "bbox": [ + 0.238, + 0.503, + 0.671, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.533, + 0.347, + 0.541 + ], + "angle": 0, + "content": "Memory Update Format" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.542, + 0.42, + 0.55 + ], + "angle": 0, + "content": "Use the following structure for each memory item:" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.551, + 0.253, + 0.558 + ], + "angle": 0, + "content": "··" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.562, + 0.303, + 0.57 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.571, + 0.293, + 0.578 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.578, + 0.622, + 0.587 + ], + "angle": 0, + "content": "[Briefly describe the problem context, purpose, and key aspects of the solution.] (Reference: Q1, Q2, Q6, etc.)" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.587, + 0.295, + 0.595 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.596, + 0.283, + 0.604 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.604, + 0.542, + 0.613 + ], + "angle": 0, + "content": "[Provide a well-documented code snippet, worked-out solution, or efficient strategy.]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.613, + 0.285, + 0.62 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.621, + 0.306, + 0.629 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.629, + 0.509, + 0.638 + ], + "angle": 0, + "content": "** Count: [Number of times this strategy has been used to solve a problem.]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.64, + 0.303, + 0.648 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.649, + 0.253, + 0.656 + ], + "angle": 0, + "content": "[...]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.657, + 0.306, + 0.665 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.665, + 0.288, + 0.673 + ], + "angle": 0, + "content": "** Count: [...]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.676, + 0.254, + 0.685 + ], + "angle": 0, + "content": "[...]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.688, + 0.303, + 0.696 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.697, + 0.253, + 0.704 + ], + "angle": 0, + "content": "[...]" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.705, + 0.306, + 0.713 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.724, + 0.733, + 0.741 + ], + "angle": 0, + "content": "- Prioritize accuracy, efficiency & generalizability: The cheatsheet should capture insights that apply across multiple problems rather than just storing isolated solutions." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.741, + 0.666, + 0.75 + ], + "angle": 0, + "content": "- Ensure clarity & usability: Every update should make the cheatsheet more structured, actionable, and easy to navigate." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.75, + 0.609, + 0.758 + ], + "angle": 0, + "content": "- Maintain a balance: While adding new strategies, ensure that old but effective techniques are not lost." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.758, + 0.727, + 0.775 + ], + "angle": 0, + "content": "- Keep it evolving: The cheatsheet should be a living document that continuously improves over time, enhancing test-time meta-learning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.777, + 0.714, + 0.786 + ], + "angle": 0, + "content": "N.B. Keep in mind that once the cheatsheet is updated, any previous content not directly included will be lost and cannot be retrieved." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.786, + 0.729, + 0.794 + ], + "angle": 0, + "content": "Therefore, make sure to explicitly copy any (or all) relevant information from the previous cheatsheet to the new cheatsheet! Furthermore," + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.795, + 0.59, + 0.803 + ], + "angle": 0, + "content": "make sure that all information related to the cheatsheet is wrapped inside the block." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.817, + 0.89, + 0.907 + ], + "angle": 0, + "content": "Figure 14: Prompt used for the memory curator under DC-RS, which is responsible for maintaining an evolving repository of problem-solving strategies, code snippets, and heuristics. The curator selectively retains high-value insights, refines existing strategies, and organizes memory efficiently. This ensures the memory (cheatsheet) remains concise, generalizable, and action-oriented, continuously improving test-time reasoning. (Once again, we note that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define \\( M_{i} \\) as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift.)" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.495, + 0.936 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.058, + 0.694, + 0.071 + ], + "angle": 0, + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.086, + 0.747, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.458, + 0.751, + 0.473 + ], + "angle": 0, + "content": "Figure 15: The rest of the prompt used by the memory curator under DC-RS (Figure 14)." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "22" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_origin.pdf b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..831971de6b34f520c3e41d8bd459d447e6b8405b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d1ad9c32c44dde2745a5c1b4de16ed8d9fa9b5a50e4197f0466a9d14abd0db +size 3718300 diff --git a/data/2025/2504_07xxx/2504.07952/full.md b/data/2025/2504_07xxx/2504.07952/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a54ec0985c0e0b1a7d8f5b787bc2863f4a7f767f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/full.md @@ -0,0 +1,672 @@ +# Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory + +Mirac Suzgun1 Mert Yuksekgonul1 Federico Bianchi2 Dan Jurafsky1 James Zou1,2 + +# Abstract + +Despite their impressive performance on complex tasks, current language models (LMs) typically operate in a vacuum: Each input query is processed separately, without retaining insights from previous attempts. Here, we present Dynamic Cheatsheet (DC), a lightweight framework that endows a black-box LM with a persistent, evolving memory. Rather than repeatedly re-discovering or re-committing the same solutions and mistakes, DC enables models to store and reuse accumulated strategies, code snippets, and general problem-solving insights at inference time. This test-time learning enhances performance substantially across a range of tasks without needing explicit ground-truth labels or human feedback. Leveraging DC, Claude 3.5 Sonnet's accuracy more than doubled on AIME math exams once it began retaining algebraic insights across questions. Similarly, GPT-4o's success rate on the Game of 24 puzzle increased from about $10\%$ to $99\%$ after the model discovered and reused a Python-based solution. In tasks prone to arithmetic mistakes, such as balancing equations, DC enabled GPT-4o and Claude to reach near-perfect accuracy by recalling previously validated code, whereas their baselines stagnated around $50\%$ . Beyond arithmetic challenges, DC yields notable accuracy gains on knowledge-demanding tasks. Claude achieved a $9\%$ improvement in GPQA-Diamond and an $8\%$ boost on MMLU-Pro Engineering and Physics problems. Crucially, DC's memory is self-curated, focusing on concise, transferable snippets rather than entire transcripts, thereby facilitating meta-learning and avoiding context ballooning. Unlike fine-tuning or static retrieval methods, DC adapts LMs' problem-solving skills on the fly, without modifying their underlying parameters, and offers a practical approach for continuously refining responses and cutting routine errors. Overall, our findings present DC as a promising approach for augmenting LMs with persistent memory, bridging the divide between isolated inference events and the cumulative, experience-driven learning characteristic of human cognition.* + +![](images/504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg) +Figure 1: Comparison of different baselines and Dynamic Cheatsheet (DC) variants on challenging reasoning benchmarks, including AIME exams and GPQA-Diamond. Baseline represents a standard prompting approach with minimal guidance, while DC- $\varnothing$ (a stronger baseline) contains explicit structured instructions for problem solving, as well as for Python code generation and execution, but lacks a memory component. Our proposed DC-Cu and DC-RS variants incorporate an evolving, text-based memory to enhance inference-time learning. Results (accuracy, %) demonstrate substantial improvements, with Claude 3.5 Sonnet gaining $27\%$ on AIME 2024 and $30\%$ on AIME 2025 under DC-Cu. In Game of 24, GPT-4o leaps from $10\%$ (baseline) to $99\%$ under DC-RS, reflecting its ability to retain and apply Python-based solutions efficiently. Similarly, Claude 3.5 Sonnet's accuracy more than doubles in Math Equation Solver, reaching $98\%$ . Overall, these findings highlight the impact of test-time learning through controlled memory augmentation and efficient retrieval. + +![](images/ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg) + +![](images/dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg) + +# 1. Introduction + +Modern large language models (LLMs) can tackle complex reasoning tasks, answer various questions, and generate extensive texts. Yet they still suffer from one critical limitation: once deployed, these models are fixed prior to deployment and typically retain no explicit or implicit memory of past questions, successes, or mistakes during inference. They approach each new problem de novo, often re-deriving the same insights—and re-committing the same errors. In contrast, human cognition stands on a foundation of incremental learning, continuously internalizing new experiences and solutions into a persistent mental model. + +In this work, we present Dynamic Cheatsheet (DC), a simple and intuitive framework that endows black-box LLMs with a persistent, evolving memory at inference time. Rather than fine-tuning weights (for instance, through dynamic evaluation (Krause et al., 2019) or domain adaptation (Gururangan et al., 2020)) or retrieving facts from a massive static corpus (as in traditional retrieval-augmented generation systems (Guu et al., 2020; Zhang et al., 2024b)), DC dynamically curates a compact library of reusable strategies, solution sketches, and code snippets. Either before or after each query, DC enables the system to decide which lessons to store, what to discard, and how to refine existing entries—thus effectively "learning" from successes and failures. It is a flexible online-learning approach that enables a black-box LLM to improve itself without needing any explicit ground truth labels or human feedback. + +The overall workflow of DC is intuitive and compelling. In one version of DC (DC-Cu.), when presented with a new query, the LM first consults its external memory to see if any prior insights, strategies or relevant model solutions have been stored. It then proposes a solution by combining the retrieved insights with its own internal reasoning capabilities. Upon generating an answer, it then proceeds to a curation phase that updates the memory: If the approach seems to be correct, useful, or practical, DC codifies it in its memory for future use; if an error surfaces, DC may revise or prune faulty heuristics. This all happens without gradient-based parameter updates, so computational overhead remains modest, and compatibility with black-box APIs (e.g., GPT-4 or Claude) is fully preserved. See Figure 4. + +We tested DC across multiple challenging benchmarks and observed that it increases performance and reduces repetitive mistakes. On AIME 2024, Claude 3.5 Sonnet jumped from $23\%$ to $50\%$ accuracy, more than doubling its baseline score, by retaining algebraic and combinatorial insights. Likewise, it gained $30\%$ accuracy on AIME 2025. Notably, these improvements hold in knowledge-intensive tasks as well. On GPQA-Diamond, which tests specialized domain questions, DC lifted Claude by over $9\%$ . In MMLU-Pro Engineering and Physics, it provided up to an $8\%$ boost in + +![](images/6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg) +Figure 2: Overall task performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (BL) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS). + +performance by allowing the model to maintain a " toolkit" of formulas and general problem-solving patterns. + +An even more striking and compelling example is the Game of 24, a puzzle that requires the solver to combine four digits into an arithmetic expression equaling 24. GPT-4o's baseline performance (10%) increased to 99% under DC. Early in the test sequence, the model discovered that an efficient Python brute-force solver eliminated all manual guesswork. Once this snippet was stored, GPT-4o simply retrieved it for subsequent queries, avoiding manual arithmetic entirely. We saw a similar pattern in Math Equation Balancer, where GPT-4o and Claude soared from 45-50% to 98-100% by "recalling" a straightforward code-based approach instead of manually fumbling with numeric manipulations. + +Nonetheless, DC is not a panacea. We found that smaller models, such as GPT-4o-mini, benefit from DC in limited amounts. These models generate too few correct solutions in these challenging tasks in the first place, leaving the memory populated with flawed or incomplete strategies. Worse, they struggle to refine stored content. DC can amplify the strengths of models that can already produce high-quality outputs, but not fix foundational gaps in reasoning. + +We also note that DC differs from naive "append the entire conversation history" in-context learning approaches. Under DC, memory is carefully curated, focusing on succinct, useful, and transferable knowledge over raw transcripts. This prevents ballooning context lengths (Liu et al., 2024a) and helps ensure that repeated retrieval remains tractable. Indeed, part of DC's contribution is in formalizing a mechanism for selective, evolving retention—storing just enough to solve the next set of tasks without drowning in an ever-growing text buffer. Cf. (Karpicke & Roediger III, 2008; Roediger & Butler, 2011; Karpicke & Blunt, 2011) + +![](images/3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg) +Figure 3: Algorithmic illustration of the Dynamic Cheatsheet (DC)-based approaches and other baseline methods. Here, Gen represents the solution generator model, Cur the memory curator, and Retr the retriever. While we use the same black-box LLMs for both generation and curation, we differentiate their roles via task-agnostic instructions (prompts). The retrieval mechanism ranks historical inputs based on cosine similarity with the current query, selecting the most relevant past examples along with their generated solutions. + +![](images/c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg) + +![](images/5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg) + +# 2. Dynamic Cheatsheet (DC) Methodology + +DC, in its core, includes an external, non-parametric memory that evolves in tandem with the LLM's inference process. Rather than fine-tuning the underlying weights, DC tracks successes and failures of the model at test time, then selectively stores heuristics, strategies, or short textual artifacts that can guide the LLM in future instances. Notably, this approach respects the black-box nature of many commercial LLM APIs: no gradient-based updates are required, and the model's core parameters remain untouched. + +# 2.1. DC: Building Blocks and Iterative Loop + +The DC framework consists of two core modules: generation and curation. Both modules can easily operate on top of the same LM (prompted differently) or on separate LMs. + +# 2.1.1. Solution Generation with Memory + +Let's consider a sequence of inputs $(x_{1},x_{2},\ldots ,x_{n})$ , where each $x_{i}\sim \mathcal{D}_{\mathrm{test}}$ indicates a new query or problem posed to the model sampled from the same distribution $\mathcal{D}_{\mathrm{test}}$ (a typical setting in online learning). The distribution $\mathcal{D}_{\mathrm{test}}$ is unknown to us. At the $i$ -th step, the model is provided with both the new query $x_{i}$ and the current memory state $M_{i}$ which captures knowledge gleaned from previous successes and failures. We denote the solution generator by Gen: + +$$ +\tilde {y} _ {i} = \operatorname {G e n} \left(x _ {i}, M _ {i}\right) \tag {1} +$$ + +Here, $\tilde{y}_i$ is the candidate solution produced by the model. $M_{i}$ helps condition the model to reuse or adapt previously stored solutions, insights, techniques, or heuristics. + +# 2.1.2. Memory Curation Step + +After the generator produces its answer $\tilde{y}_i$ to $x_i$ , the curator, Cur, updates the current content of the memory: + +$$ +M _ {i + 1} = \operatorname {C u r} \left(M _ {i}, x _ {i}, \tilde {y} _ {i}\right) \tag {2} +$$ + +During memory curation, Cur mainly considers: (i) the usefulness and generalizability of the newly produced answer + +(i.e., if $\tilde{y}_i$ is correct or provides valuable and generalizable insights, it is distilled into a form suitable for later reference), (ii) refinement or removal of existing memory entries (i.e., if an existing memory entry was incorrect or superseded by a more efficient or versatile strategy, Cur may remove or update it), and (iii) clarity and compactness of the entire memory (i.e., memory entries are consolidated to retain succinct, high-impact references and heuristics). + +![](images/92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg) +Figure 4: Illustration of Dynamic Cheatsheet (DC-Cu variant). + +Cur does not have access to ground-truth labels; so, it has to assess the correctness and efficiency of the solutions by itself before updating the memory. In our experiments, we instruct a single model to perform this crucial step. Yet, in practice, Cur can be implemented as a series of steps that instruct multiple tools and models, through different prompts, to verify the validity and efficiency of the solution and to transform the raw solution text into even more generalizable, reliable, and efficient strategies, insights, and code snippets. + +We refer to this version of DC above as DC-Cu (short for DC-Cumulative). Under DC-Cu, the system first performs solution generation based on the current memory (Eqn. 1) and then updates the memory (Eqn. 2), by cumulatively expanding and refining the memory items thus far. Unlike DC-RS, which is discussed in the next part, DC-Cu, does not contain a retrieval component, however. + +# 2.2. DC with Retrieval & Synthesis (DC-RS) + +DC-Cu has two potential drawbacks. First, it updates the memory after processing an input query, rather than refining it before generating a response. This means the model lacks + +the opportunity to incorporate new insights from the current query while reasoning through its solution. Second, DC-Cu does not store or revisit past input-output pairs unless explicitly retained in memory. This omission prevents the model from directly retrieving and leveraging historical responses, which can be particularly valuable in benchmarks covering diverse topics or domains (e.g., GPQA-Diamond). + +To address these issues, DC-RS modifies the sequence of memory updates and introduces a retrieval mechanism, Retr, into the curation process. Retr allows the model to retrieve the most relevant past input-output pairs from its knowledge base. By refining the memory before responding and retrieving prior cases when needed, DC-RS enhances the model's adaptability and reasoning efficiency. + +DC-RS first retrieves $^{1}$ top- $k$ most similar inputs, along with their model-generated outputs, from previously seen examples, which we denote by $R_{i}^{(k)}$ (or simply $R_{i}$ ).2 It then passes these select examples, $R_{i}$ , along with the most recent memory content, $M_{i-1}$ , to the curator to update the memory, that is to get $M_{i}$ . Finally, it uses the generator to produce $\tilde{y}_{i}$ , given $x_{i}$ and $M_{i}$ . We summarize all these steps below: + +$$ +R _ {i} = \operatorname {R e t r} \left(x _ {i}, \left\{\left(x _ {j}, \tilde {y} _ {j}\right) \right\} _ {j < i}, k\right) \tag {3} +$$ + +$$ +M _ {i} = \operatorname {C u r} \left(M _ {i - 1}, x _ {i}, R _ {i}\right) \tag {4} +$$ + +$$ +\tilde {y} _ {i} = \operatorname {G e n} \left(x _ {i}, M _ {i}\right) \tag {5} +$$ + +# 2.3. Baselines + +To quantify the efficacy of memory-driven test-time learning, we compare DC and its variants to four baselines: + +(1) Baseline prompting (BL). This plain "vanilla" prompting approach, with minimal instructions, simply prompts the model without any iterative memory or retrieval mechanism. It reflects traditional one-off inference. $^3$ +(2) DC- $\varnothing$ (empty memory). To isolate the effect of memory curation, this DC baseline always keeps the memory content effectively empty. $^4$ DC- $\varnothing$ allows us to measure how much performance improvement arises purely from storing and reusing knowledge over time. While there is no continuous knowledge storage or strategy reuse, this method follows the instructions in Figure 13 and is therefore a strong baseline. + +(3) Full-History Appending (FH). This is a naive approach that appends the entire conversation history to the model input without any curation or truncation. FH can exceed context-window limits and include redundant or low-value information, but nonetheless, it provides a useful comparison for methods that actively curate content. +(4) Dynamic Retrieval (DR). A final baseline uses retrieval but no curation. Specifically, for each new query, it retrieves the most similar past interactions and directly pastes them, verbatim, into the prompt. DR can help the model see relevant input-output pairs but not directly codify any abstract or generalized solutions.7 + +Figure 3 (above) contains pseudocodes of all the primary methods and baselines considered in this paper. + +# 3. Experimental Setup + +# 3.1. Tasks and Datasets + +To rigorously evaluate DC's effectiveness, we focus on challenging tasks where contemporary state-of-the-art LLMs, such as GPT-4o and Claude 3.5, still face limitations. Rather than evaluating on benchmarks where performance is near saturation (e.g., BBH (Suzgun et al., 2023b), MGSM (Shi et al., 2023), GSM8K (Cobbe et al., 2021)), we prioritize tasks that demand multi-step reasoning, heuristic search, strategic adaptation, and cumulative learning—that is, tasks in which iterative memory refinement can yield tangible improvements over time. $^{8}$ + +Overall, the selected datasets include algorithmic, logical, and domain-specific reasoning tasks, each chosen to stress-test the model's ability to refine its reasoning over time. + +(a) AIME 2020-2025 Exam Questions: The American Invitational Mathematics Examination (AIME) is a prestigious high-school competition featuring complex problems across algebra, combinatorics, number theory, geometry, and probability. These questions require deep mathematical reasoning and multi-step problem-solving. We consider three subsets: AIME $2024^{9}$ (30 questions), AIME $2025^{10}$ (30 questions), and AIME $2020 - 2024^{11}$ (133 questions). + +(b) GPQA-Diamond (Rein et al., 2024): A high-quality, difficult subset of the Graduate-Level Google-Proof Q&A (GPQA) benchmark, GPQA-Diamond contains 198 expert-validated questions across natural sciences, including biology, chemistry, and physics. These questions were correctly answered by domain experts but often missed by non-experts, making them ideal for evaluating DC's ability to handle complex, multi-hop reasoning tasks. + +(c) Game of 24 (Yao et al., 2023; Suzgun & Kalai, 2024): A heuristic-driven arithmetic challenge where the objective is to form an expression that evaluates to 24 using four given numbers exactly once. For instance, if the input values were "7 7 8 11," one valid answer would be "8*(7+7-11)." This task emphasizes systematic search, strategic reasoning, and pattern recognition. We use the 100 examples from (Suzgun & Kalai, 2024) to assess DC's capacity for refining computational heuristics and strategy over manual attempts. + +(d) Math Equation Balancer: Focused on elementary arithmetic reasoning, this dataset requires the model to complete equations by inserting the appropriate operators to form valid expressions. The task emphasizes the sequential placement of operators, as illustrated by the example “1 ? 2 ? 3 = 6,” where the model must identify the correct operators to satisfy the equation (“1 + 2 + 3 = 6” or “1 * 2 * 3 = 6”). We compiled 250 arithmetic expressions for this task. + +(e) MMLU-Pro (Engineering and Physics) (Wang et al., 2024b): A professional-level subset of the MMLU benchmark focused on physics and engineering. All questions are presented in a multiple-choice form. The original dataset contains 1,299 physics and 969 engineering questions. We sampled 250 questions from each subset. + +# 3.2. Language Models + +We evaluate the efficacy of DC across a range of language models. Our selection includes both state-of-the-art LLMs such as GPT-4o and Claude 3.5 Sonnet and their smaller-scale counterparts (namely, GPT-4o-mini and Claude 3.5 Haiku), as well as models such as DeepSeek R1 that are designed specifically for reasoning-intensive tasks. + +# 3.3. Evaluation Protocol + +To ensure standardized and reliable evaluation, all models are instructed to format their final answers in a structured, machine-readable format. All model answers are expected to be wrapped in the following XML-style tags: + +```txt + (final answer) +``` + +This explicit format ensures accurate and consistent parsing, eliminating errors arising from extraneous text or ambiguous outputs. Once extracted, the final answers are evaluated + +using their corresponding task-specific accuracy metric. + +# 3.3.1. Accuracy Metrics + +Given the diversity of the tasks, we use different accuracy metrics tailored to the specific requirements of each dataset. + +Soft Match (SM) is a lenient metric that considers an answer correct if it matches the ground truth after ignoring minor formatting differences, such as punctuation or whitespace variations. We apply this metric to GPQA-Diamond, and MMLU Pro (Engineering and Physics), in which questions are presented in a multiple-choice format. + +Functionally Correct (FC) is an even more flexible metric that evaluates whether the model's output satisfies the task-specific constraints, even if the exact numeral presentation or formatting differs slightly from the reference solution. We apply this metric to the Game of 24, Math Equation Balancer, and AIME benchmarks. + +# 4. Main Results + +# 4.1. DC enables test-time learning and reduces repetitive errors + +One of the most compelling illustrations of DC's capabilities emerges from the Game of 24 task. As seen in Table 1, GPT-4o's baseline accuracy on this arithmetic puzzle was just $10\%$ . Under DC-RS, its performance increased to $99\%$ , illustrating DC's capacity for test-time learning and iterative refinement. Early in the task sequence, GPT-4o discovered a reliable, Python-based brute-force method to solve Game of 24 and later on recognized the repetitive structure of the problem. The model then encoded this approach into its memory. Once established, GPT-4o consistently retrieved and applied the more or less same Python solution for subsequent examples, leading to rapid and accurate results. + +The performance under DC- $\varnothing$ (19%) further highlights the positive impact of memory curation and retrieval. DC- $\varnothing$ uses the same core generator but keeps the memory empty, thus lacking the mechanism to store and reuse solutions. The large gap between 19% (DC- $\varnothing$ ) and 99% (DC-RS) confirms that effective memory usage, in which past solutions are retrieved and generalized, is the main driver of GPT-4o's transformation from ad-hoc solver to near-perfect performer in Game of 24. + +In contrast, Claude 3.5 Sonnet showed marginal gain, moving from $12\%$ to $14\%$ . Despite DC's scaffolding, Claude did not internalize a generalized approach but instead continued to rely on manual arithmetic solutions. This underscores that while DC provides the framework for test-time adaptation, its ultimate success hinges on the model's innate capacity to identify and encode robust, reusable strategies. + +
TasksClaude 3.5 SonnetGPT-4o
BLDC-∅DRDC-Cu.DC-RSBLDC-∅DRDC-Cu.DC-RS
AIME 202423.336.743.350.046.720.036.726.736.740.0
AIME 20256.723.323.336.730.06.710.010.016.720.0
AIME 2020–246.730.139.138.440.69.824.124.120.324.8
Game of 2412.010.011.014.014.010.019.06.093.099.0
GPQA Diamond59.660.163.661.168.757.157.155.158.157.1
Math Eqn. Balancer44.856.460.410097.850.088.010010099.2
MMLU Pro Eng.61.257.265.266.867.653.251.648.844.051.2
MMLU Pro Physics74.075.680.477.682.075.670.875.670.475.2
+ +Table 1: Performance comparison of Dynamic Cheatsheet (DC) variants for Claude 3.5 Sonnet and GPT-4o across multiple benchmarks. BL (Baseline): standard inference without memory; DC-∅ (Empty Memory): includes structured problem-solving and explicit tool-use instructions but no memory retention mechanism; DR (Dynamic Retrieval): uses retrieval but lacks curated memory updates; DC-Cu (Cumulative Memory): iteratively accumulates model solutions but lacks retrieval; and DC-RS (Retrieval & Synthesis): combines retrieval with memory refinement/synthesis. These results highlight substantial accuracy gains under DC: Claude 3.5 Sonnet's AIME 2024 accuracy jumps by $27\%$ under DC-Cu, and GPT-4o's Game of 24 accuracy leaps from $10\%$ to $99\%$ under DC-RS. + +# 4.2. DC provides substantial improvements across various challenging reasoning benchmarks + +Beyond Game of 24, DC yielded significant gains across a range of complex mathematical and algorithmic tasks. See Table 1. The results below illustrate how iterative solution reuse can helpful in complex reasoning problems. + +AIME Exam Problems. The AIME exams provided some of the most dramatic improvements under DC. For Claude 3.5 Sonnet, performance on AIME 2020-2024 surged from $6.7\%$ to $40.6\%$ under DC-RS. A similar upward trend appeared on AIME 2024 (23.3% to $50.0\%$ ) and AIME 2025 (6.7% to $36.7\%$ ) under DC-Cu. DC-Cu, where the model curates memory after processing the input and does not involve a retrieval stage, also proved potent in recent exam sets, achieving highest accuracy scores in AIME 2024 and 2025. GPT-4o also showed some noteworthy gains. Its AIME 2024 performance raised from $20.0\%$ to $40.0\%$ under DC-RS, while its AIME 2025 score climbed from $6.7\%$ to $20.0\%$ . These boosts suggest that structured test-time-produced memory can help tackle difficult math problems. + +GPQA-Diamond. On GPQA-Diamond, Claude 3.5 Sonnet improved from $59.6\%$ to $68.7\%$ under DC-RS, a robust $9.1\%$ gain purely from test-time adaptation. DR $(63.6\%)$ demonstrated that retrieval alone helps, but the further jump to $68.7\%$ highlights how memory curation and synthesis can yield additional benefits. By contrast, GPT-4o experienced only a slight increase from $57.1\%$ to $58.1\%$ with DC-RS; our quantitative analysis of the model's outputs and memory showed us that retrieval can, in some cases, introduce confusion, especially if suboptimal examples are recalled. This contrast between different models underscores how the success of retrieval-based adaptation partly depends on model-specific generation and curation capabilities. + +Math Equation Balancer. As Table 1 shows, the base- + +line performance for Claude 3.5 Sonnet (44.8%) rose to $98 - 100\%$ with DC-RS and DC-Cu, while GPT-4o similarly improved from $50.0\%$ to near-perfect accuracy (99-100%). As observed in Game of 24, the models quickly learned an algorithmic or Python-based balancing routine, stored it in external memory, and repeatedly retrieved it, achieving exceptional consistency once the core method was established. + +MMLU-Pro Tasks. For MMLU-Pro Eng. and Physics, Claude 3.5 Sonnet exhibited consistent gains, rising by up to $8.0\%$ in Physics (from $74\%$ to $82\%$ ). Our examination of the curated memory entries shows that Claude temporarily stored and retrieved compact "reference guides" on engineering and physics principles, which might have proved beneficial for thematically similar questions. GPT-4o, on the other hand, observed slight decreases from the baseline on these tasks, suggesting that domain complexity and baseline knowledge gaps may attenuate DC's benefits if curated memory is less reliable or consistent. + +# 4.3. Memory curation (DC) fosters generalization and provides gains over full-history-appending (FH) + +Whereas FH (full-history) simply appends every previous dialogue turn into the prompt, DC actively filters and synthesizes high-value content. As shown in Table 2, Sonnet under FH reached $26.7\%$ accuracy in 2024 questions, while DC-based methods hit $50.0\%$ . Similarly, GPT-4o managed a baseline of $20.0\%$ but fell to $6.7\%$ using FH, in direct contrast to $40.0\%$ with DC-RS. Excessive uncurated input-output pairs can not only overwhelm the model's context window, dilute crucial insights and hamper retrieval efficiency, but also significantly increase inference costs over time. On the other hand, DC's selective memory curation ensures that problem-solving tips or code snippets remain readily accessible without clutter, thus facilitating more robust and consistent improvements across consecutive queries. + +![](images/965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg) +Figure 5: Excerpt from GPT-4o's external memory after processing 100 examples from Game of 24 under DC-RS. Early in the test sequence, the model discovered a Python-based brute-force solution, stored it, and subsequently retrieved it for subsequent puzzles. This shift to structured code reuse resulted in a dramatic performance increase from $10\%$ to $99\%$ accuracy, eliminating arithmetic errors and redundant problem-solving efforts. + +# 4.4. DC fosters efficient tool usage / code generation + +A successful behavior under DC is the LLMs' inclination toward code generation to handle computationally intensive tasks. GPT-4o's near-complete reliance on Python scripts for Game of 24 exemplifies this shift. Rather than performing manual arithmetic repeatedly, GPT-4o recognized that code-based brute force is more systematic. It generated, stored, and iteratively refined a Python function that tested permutations of numbers and operations, allowing it to solve each instance of Game of 24 with high accuracy. + +This inclination toward automation illustrates DC's potential to nurture efficient tool-usage: the capacity to recognize when external tools (e.g., Python, symbolic math engines, or dedicated solvers) are more robust than internally verbalized chain-of-thought calculations. While we restricted the scope of tool usage to Python interpreter in this study, future expansions could easily explore a broader suite of tools, potentially amplifying LLM performance in specialized domains such as computational biology or legal research. + +
TasksClaude 3.5 SonnetGPT-4o
BLFHDC-Cu.BLFHDC-RS
AIME 202423.326.750.020.013.340.0
AIME 20256.76.736.76.73.320.0
+ +Table 2: Performance breakdown of BL (default baseline), FH (full history), DC-Cu, and DC-RS approaches under AIME 2024 and 2025. FH stores all past queries and outputs, while DC-Cu and DC-RS selectively refine stored memory. Results indicate that targeted memory curation in DC-RS leads to greater accuracy gains compared to full history retention, supporting the need for structured, self-updating knowledge mechanisms. + +# 4.5. Model scale and capacity impact DC effectiveness + +Our current results indicate that the effectiveness of DC is strongly tied to the model's scale and underlying generative capacity. While Claude 3.5 Sonnet and GPT-4o showed notable gains across multiple tasks under DC, their smaller counterparts, Claude 3.5 Haiku and GPT-4o-mini, showed more limited and inconsistent gains. + +Table 3, for instance, shows that Claude 3.5 Haiku achieved moderate gains under DC, with its accuracy on AIME 2024 rising from $10.0\%$ (baseline) to $36.7\%$ under DC-Cu. But gains on AIME 2025 were weaker, reaching only $13.3\%$ under DC- $\varnothing$ and DC-Cu. Interestingly, GPQA-Diamond saw an improvement from $43.4\%$ to $49.0\%$ under DC-RS, + +# GENERAL META-REASONING STRATEGIES + + + + + +Systematic Problem Analysis Framework (Reference: Q1-Q20) + +For complex mathematical problems: + +1. State problem requirements clearly +2. List key observations and theorems applicable +3. Identify patterns and relationships +4. Break into manageable sub-problems +5. Verify against examples +6. Consider computational approach when analytical solution is complex +7. For grid problems, analyze movement patterns and symmetries +8. For combinatorial problems, use appropriate counting techniques +9. Implement verification code when possible +10. Consider edge cases and constraints +11. For grid coloring problems, consider row/column patterns + + + + +Example application: + +1. Requirements: list all given conditions +2. Observations: identify applicable theorems +3. Patterns: look for structural relationships +4. Sub-problems: break into steps +5. Verification: test against examples +6. Implementation: use Python for verification + + + + +Count: 20 + +Figure 6: Example of Claude 3.5 Sonnet's curated memory after processing 20 AIME 2024 questions under DC-Cu. The memory captures key solution strategies, enables the model to generalize across similar computational problems, and boosts its accuracy. + +![](images/929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg) +Figure 7: Cumulative performance progression under DC for GPQA-Diamond (left) and Game of 24 (right). In GPQA-Diamond, Claude 3.5 Sonnet steadily improves as it accumulates relevant knowledge snippets (the first few points are noisy because $y$ measures cumulative accuracy). Meanwhile, in Game of 24, GPT-4o rapidly transitions from trial-and-error arithmetic to near-perfect performance once it recognizes and stores a Python-based solution. These trends highlight DC's ability to enhance accuracy via iterative test-time learning. + +![](images/0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg) + +suggesting that retrieval-based adaptation might still provide utility in smaller models. + +
TasksClaude 3.5 Haiku
BLDC-∅DC-Cu.DC-RS
AIME 202410.026.736.730.0
AIME 20250.013.313.310.0
GPQA-Diamond43.441.943.749.0
TasksGPT-4o-mini
BLDC-∅DC-Cu.DC-RS
AIME 202416.720.013.313.3
AIME 202510.013.313.316.7
GPQA-Diamond34.334.333.832.3
+ +Table 3: Performance of Claude 3.5 Haiku and GPT-4o-mini, the smaller counterparts of Claude 3.5 Sonnet and GPT-4o, across AIME (2024, 2025) and GPQA-Diamond. These smaller models struggle to fully leverage DC, suggesting that memory-based adaptation is most effective when the base LM has sufficient generative competence. Performance improvements are more muted, highlighting the dependency of DC on model-scale reasoning ability. + +That said, GPT-4o-mini (Table 3) showed even smaller gains, with some variants leading to slight declines in performance. On AIME 2024, DC- $\varnothing$ provided a $20.0\%$ boost, but both DC-Cu and DC-RS performed worse than baseline. AIME 2025 showed a minor improvement, peaking at $16.7\%$ under DC-RS. On GPQA-Diamond, GPT-4o-mini's performance, however, remained largely stagnant or slightly declined under memory-based adaptation, suggesting that it struggled to leverage stored information effectively. + +These imply two drawbacks of smaller models under DC: + +(a) Generative competence. For DC to be effective, the base model must produce correct solutions with sufficient frequency to populate the memory with high-quality, reusable strategies. Smaller models, such as GPT-4o-mini and Claude 3.5 Haiku, generate correct solutions less reliably, + +leading to a sparse or low-quality memory repository. As a result, iterative refinement stalls because the stored knowledge consists mostly of incorrect or partial attempts. + +(b) Contextual and memory curation limitations. Smaller models struggle with long-context understanding/generation and memory retrieval, leading to inefficient or irrelevant memory usage. Unlike their larger counterparts, which can more effectively retrieve and synthesize solutions from stored heuristics, smaller models often fail to retrieve the most relevant past solutions or misapply retrieved knowledge to new problems. This results in inconsistent performance under DC-RS, particularly in tasks requiring complex reasoning or strategic adaptation. + +# 4.6. Test-time task similarity and example ordering can amplify DC's overall impact + +Another central insight is that DC thrives when test examples share structural similarities. In both Game of 24 and Math Equation Balancer, once GPT-4o identified an efficient solution, it reused it consistently for subsequent tasks. Similarly, in AIME, discovering a geometry or combinatorics strategy allowed for easy transfer across questions of analogous structure. Consequently, tasks arranged to present related questions early may accelerate and improve the model's test-time learning. This suggests that curriculum-style learning (Bengio et al., 2009), where simpler or archetypal problems are presented first to build a repository of valid heuristics, may potentially bootstrap performance. Cf. (Lopez-Paz & Ranzato, 2017; Zelikman et al., 2022; Chen et al., 2024) + +# 5. Additional Analyses and Discussions + +Reasoning and information efficiency. One key insight is that DC reduces the need to "reinvent the wheel" for each query. By encoding and reusing well-established techniques + +(e.g., Python-based solving for Game of 24), models can bypass repeated rediscovery of the same strategies. This significantly cuts down reasoning overhead and token usage in subsequent queries, though the initial cost of discovering a robust approach and curating it remains non-trivial. + +DC performs better than majority voting (MV). To test if DC provides advantages over conventional MV at inference, we also tested Sonnet on AIME 2024 and 2025 using both approaches. MV, which selects the most common answer from three independent generations, yielded no improvements over single-shot inference. As seen in Table 4, on AIME 2024, MV performed identically to the baseline $(23.3\%)$ , while on AIME 2025, it remained at $6.7\%$ , offering no tangible gain. Even with DC- $\emptyset$ , MV slightly underperformed $(33.3\%$ vs. $36.7\%)$ . In contrast, DC-Cu outperformed MV, reaching $50.0\%$ on AIME 2024 and $36.7\%$ on AIME 2025. Unlike MV, which passively aggregates outputs, DC actively refines knowledge over time, eliminating errors and improving solution quality. This confirms that memory-based adaptation is far more effective than simple statistical voting in complex reasoning tasks. + +
TasksClaude 3.5 Sonnet
BLMV(BL)DC-∅MV(DC-∅)DC-Cu.
AIME 202423.323.3336.733.350.0
AIME 20256.76.723.323.336.7
+ +Table 4: Comparison of majority voting (MV) with DC on AIME. + +Clustering of errors and corrections. Our experiments suggest that errors and their corrections often cluster in a latent embedding space. See Figure 10. Once a model acquires a high-quality heuristic for a cluster of related queries, it can apply this knowledge to tightly embedded neighbors. However, faulty heuristics that slip into memory can be equally amplified. Ensuring that the memory remains "clean" thus requires careful curation and, if necessary, pruning to avoid propagating erroneous strategies. + +Transferability of memory content across models. We also observed that larger models, such as Claude 3.5 Sonnet and GPT-4o, can sometimes produce higher-quality strategies that, in principle, could benefit smaller models if the memory is transferred. However, if a smaller model lacks the generative capacity to interpret or refine those strategies correctly, its performance can stall or degrade. In our ablation experiments, we observed mixed results. This indicates that memory entries, while helpful, cannot fully compensate for inadequate base capability. + +Long-context generation versus understanding. Most large LLMs excel at processing lengthy inputs but struggle to generate comparably long $^{12}$ and well-organized outputs. + +DC's memory curation after each query can demand precise reproduction or modification of prior knowledge. We observed instances where the model merely references or abbreviates the existing memory (e.g., "Previous content [...] preserved") instead of explicitly rewriting it. Such truncated memory updates can reduce the quality of stored heuristics over time. Potential solutions include maintaining a structured, external database that the LM can reference without regenerating large swaths of text each time. + +Retrieval bottlenecks and noise. While retrieval-based variants (e.g., DC-RS) can substantially improve accuracy, poorly filtered retrieval mechanisms can introduce confusion, particularly when presented with highly diverse or loosely related queries. For example, in our experiments, GPT-4o's performance occasionally dipped in GPQA-Diamond due to suboptimal retrieval choices. This underscores the importance of robust retrieval methods (e.g., dense vector search, advanced ranking algorithms) that can reliably surface higher quality exemplars or heuristics while suppressing irrelevant or contradictory texts. + +Hierarchical and modular memory. As LLM deployments scale, specialized domains may benefit from subdividing or hierarchically organizing memory. For instance, a system could maintain separate curated memories for topics like combinatorics or physics, each updated by a specialized retrieval or curation mechanism. This may reduce the load on a unified memory store and help isolate errors within their respective domains, with the goal of further improving the clarity and reliability of retrieved heuristics. + +Time and token complexity. Although DC requires memory curation after each query, it optimizes efficiency over time by reducing redundant computation and token usage.[13] As the model retrieves and refines solutions, memory maintenance becomes a net gain rather than a cost. However, its sequential structure still poses challenges for large-scale parallel or batch tasks requiring independent inference. + +Smaller or more specialized models and R1 experiments. Finally, we note that smaller models, such as GPT-4o-mini, show limited gains under DC, as seen in Table 3. Additional experiments with "R1" models such as DeepSeek R1 and o1 similarly showed minimal or inconsistent improvements. In these cases, these models' generative ability appears too restricted to produce reliable strategies for storage or to interpret retrieved heuristics effectively. The solutions were far too verbose and long. Without sufficiently accurate and efficient base solutions, memory curation cannot yield substantial gains. This limitation ties back to the core premise that effective DC demands a capable foundation model to seed and refine the curated knowledge. + +Overall, DC offers a useful and practical framework for continuous, test-time learning in LLMs. Our findings emphasize the synergy between model capacity and memory curation, the importance of structural task similarity and retrieval precision, and the benefits of offloading repeated computations to flexible external stores (e.g., Python scripts). At the same time, alternative mechanisms (e.g., specialized sub-memories or adaptive example ordering) and more sophisticated retrieval techniques (e.g., topological clustering) remain promising directions for further research. + +# Acknowledgments + +We thank Batu El, Sabri Eyuboglu, Tayfun Gur, Emily Shen, Jake Silberg, Elana Simon, and Kyle Swanson for their helpful comments and suggestions. We also thank the members of the James Zou Lab at Stanford for their feedback in the early stages of this project. Suzgun gratefully acknowledges the support of an HAI-SAP Fellowship. + +# References + +Amari, S.-I. Natural gradient works efficiently in learning. Neural computation, 10(2):251-276, 1998. +Arcuschin, I., Janiak, J., Krzyzanowski, R., Rajamanoharan, S., Nanda, N., and Conmy, A. Chain-of-thought reasoning in the wild is not always faithful. In Workshop on Reasoning and Planning for Large Language Models, 2025. URL https://openreview.net/forum?id=L8094Whth0. +Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations, 2023. +Bengio, Y., Louradour, J., Collobert, R., and Weston, J. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pp. 41-48, 2009. +Besta, M., Blach, N., Kubicek, A., Gerstenberger, R., Podstawski, M., Gianinazzi, L., Gajda, J., Lehmann, T., Niewiadomski, H., Nczyk, P., et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024. +Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206-2240. PMLR, 2022. + +Bottou, L. and Cun, Y. Large scale online learning. Advances in neural information processing systems, 16, 2003. +Bottou, L. and Le Cun, Y. On-line learning for very large data sets. Applied stochastic models in business and industry, 21(2):137-151, 2005. +Boudiaf, M., Mueller, R., Ben Ayed, I., and Bertinetto, L. Parameter-free online test-time adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8344-8353, 2022. +Bulatov, A., Kuratov, Y., and Burtsev, M. Recurrent memory transformer. Advances in Neural Information Processing Systems, 35:11079-11091, 2022. +Chen, Z., Deng, Y., Yuan, H., Ji, K., and Gu, Q. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. +Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Feng, T., Han, P., Lin, G., Liu, G., and You, J. Thought-retriever: Don't just retrieve raw data, retrieve thoughts, 2024. URL https://openreview.net/forum?id=SkDNQbMQba. +Feng, Y., Li, F., Song, Z., Zheng, B., and Koehn, P. Learn to remember: Transformer with recurrent memory for document-level machine translation. arXiv preprint arXiv:2205.01546, 2022. +Golovneva, O., O'Brien, S., Pasunuru, R., Wang, T., Zettlemoyer, L., Fazel-Zarandi, M., and Celikyilmaz, A. Pathfinder: Guided search over multi-step reasoning paths. arXiv preprint arXiv:2312.05180, 2023. +Gou, Z., Shao, Z., Gong, Y., Shen, Y., Yang, Y., Duan, N., and Chen, W. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023. +Graves, A. Generating sequences with recurrent neural networks. arXiv preprint arXiv:1308.0850, 2013. +Graves, A., Wayne, G., and Danihelka, I. Neural Turing machines. arXiv preprint arXiv:1410.5401, 2014. +Gururangan, S., Marasovic, A., Swayamdipta, S., Lo, K., Beltagy, I., Downey, D., and Smith, N. A. Don't stop pretraining: Adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964, 2020. + +Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929-3938. PMLR, 2020. +He, Z., Karlinsky, L., Kim, D., McAuley, J., Krotov, D., and Feris, R. Camelot: Towards large language models with training-free consolidated associative memory. arXiv preprint arXiv:2402.13449, 2024. +Joulin, A. and Mikolov, T. Inferring algorithmic patterns with stack-augmented recurrent nets. Advances in neural information processing systems, 28, 2015. +Karpicke, J. D. and Blunt, J. R. Retrieval practice produces more learning than elaborative studying with concept mapping. Science, 331(6018):772-775, 2011. +Karpicke, J. D. and Roediger III, H. L. The critical importance of retrieval for learning. science, 319(5865): 966-968, 2008. +Karpukhin, V., Oguz, B., Min, S., Lewis, P. S., Wu, L., Edunov, S., Chen, D., and Yih, W.-t. Dense passage retrieval for open-domain question answering. In EMNLP (1), pp. 6769-6781, 2020. +Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=Hk1BjCEKvH. +Kojima, T., Gu, S. S., Reid, M., Matsuo, Y., and Iwasawa, Y. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35: 22199-22213, 2022. +Krause, B., Kahembwe, E., Murray, I., and Renals, S. Dynamic evaluation of transformer language models. arXiv preprint arXiv:1904.08378, 2019. +Lazaridou, A., Gribovskaya, E., Stokowiec, W. J., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering, 2023. URL https://openreview.net/forum?id=hFCUPkSSRE. +Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., Kuttler, H., Lewis, M., Yih, W.-t., Rocktaschel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in neural information processing systems, 33:9459-9474, 2020. +Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics, 12:157-173, 2024a. + +Liu, X., Dong, P., Hu, X., and Chu, X. Longgenbench: Long-context generation benchmark. arXiv preprint arXiv:2410.04199, 2024b. +Liu, Y., Kothari, P., Van Delft, B., Bellot-Gurlet, B., Mordan, T., and Alahi, A. Ttt++: When does self-supervised test-time training fail or thrive? Advances in Neural Information Processing Systems, 34:21808-21820, 2021. +Long, J. Large language model guided tree-of-thought. arXiv preprint arXiv:2305.08291, 2023. +Lopez-Paz, D. and Ranzato, M. Gradient episodic memory for continual learning. Advances in neural information processing systems, 30, 2017. +Lu, P., Peng, B., Cheng, H., Galley, M., Chang, K.-W., Wu, Y. N., Zhu, S.-C., and Gao, J. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36: 43447-43478, 2023. +Madaan, A., Tandon, N., Clark, P., and Yang, Y. Memory-assisted prompt editing to improve gpt-3 after deployment. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2833–2861, 2022. +Madaan, A., Tandon, N., Gupta, P., Hallinan, S., Gao, L., Wegreffe, S., Alon, U., Dziri, N., Prabhumoye, S., Yang, Y., et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023. +McCloskey, M. and Cohen, N. J. Catastrophic interference in connectionist networks: The sequential learning problem. In Psychology of learning and motivation, volume 24, pp. 109-165. Elsevier, 1989. +Mikolov, T., Karafiát, M., Burget, L., Cernocký, J., and Khudanpur, S. Recurrent neural network based language model. In *Interspeech*, volume 2, pp. 1045–1048. Makuhari, 2010. +Munkhdalai, T., Sordoni, A., Wang, T., and Trischler, A. Metalearned neural memory. Advances in Neural Information Processing Systems, 32, 2019. +Niu, S., Wu, J., Zhang, Y., Chen, Y., Zheng, S., Zhao, P., and Tan, M. Efficient test-time model adaptation without forgetting. In International conference on machine learning, pp. 16888-16905. PMLR, 2022. +Qin, Y., Liang, S., Ye, Y., Zhu, K., Yan, L., Lu, Y., Lin, Y., Cong, X., Tang, X., Qian, B., et al. Toollm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023. + +Rannen-Triki, A., Bornschein, J., Pascanu, R., Hutter, M., György, A., Galashov, A., Teh, Y. W., and Titsias, M. K. Revisiting dynamic evaluation: Online adaptation for large language models. arXiv preprint arXiv:2403.01518, 2024. +Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Ti67584b98. +Roediger, H. L. and Butler, A. C. The critical role of retrieval practice in long-term retention. Trends in cognitive sciences, 15(1):20-27, 2011. +Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N., and Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551, 2023. +Shen, Y., Song, K., Tan, X., Li, D., Lu, W., and Zhuang, Y. HuggingGPT: Solving AI tasks with chatGPT and its friends in hugging face. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=yHdTscY6Ci. +Shi, F., Fried, D., Ghazvininejad, M., Zettlemoyer, L., and Wang, S. I. Natural language to code translation with execution. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 3533-3546, 2022. +Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., Das, D., and Wei, J. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp. +Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. REPLUG: Retrievalaugmented black-box language models. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8371-8384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.463. URL https://aclanthology.org/2024.nacl-long.463/. +Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36:8634-8652, 2023. + +Sun, Y., Wang, X., Liu, Z., Miller, J., Efros, A., and Hardt, M. Test-time training with self-supervision for generalization under distribution shifts. In International conference on machine learning, pp. 9229-9248. PMLR, 2020. +Sun, Y., Li, X., Dalal, K., Xu, J., Vikram, A., Zhang, G., Dubois, Y., Chen, X., Wang, X., Koyejo, S., et al. Learning to (learn at test time): Rnns with expressive hidden states. arXiv preprint arXiv:2407.04620, 2024. +Surís, D., Menon, S., and Vondrick, C. Vipergpt: Visual inference via python execution for reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 11888-11898, 2023. +Suzgun, M. and Kalai, A. T. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024. +Suzgun, M., Gehrmann, S., Belinkov, Y., and Shieber, S. M. Memory-augmented recurrent neural networks can learn generalized dyck languages. arXiv preprint arXiv:1911.03329, 2019. +Suzgun, M., Melas-Kyriazi, L., and Jurafsky, D. Follow the wisdom of the crowd: Effective text generation via minimum bayes risk decoding. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 4265-4293, 2023a. +Suzgun, M., Scales, N., Scharli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q., Chi, E., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, 2023b. +Suzgun, M., Shieber, S. M., and Jurafsky, D. string2string: A modern python library for string-to-string algorithms. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pp. 278-285, 2024. +Syed, N. A., Liu, H., and Sung, K. K. Handling concept drifts in incremental learning with support vector machines. In Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 317-321, 1999. +Thrun, S. and Mitchell, T. M. Lifelong robot learning. Robotics and autonomous systems, 15(1-2):25-46, 1995. +Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023. + +Wang, D., Shelhamer, E., Liu, S., Olshausen, B., and Darrell, T. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020. +Wang, X., Wei, J., Schuurmans, D., Le, Q. V., Chi, E. H., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw. +Wang, Y., Gao, Y., Chen, X., Jiang, H., Li, S., Yang, J., Yin, Q., Li, Z., Li, X., Yin, B., et al. Memoryllm: Towards self-updatable large language models. arXiv preprint arXiv:2402.04624, 2024a. +Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., Li, T., Ku, M., Wang, K., Zhuang, A., Fan, R., Yue, X., and Chen, W. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b. URL https://openreview.net/forum?id=y10DM6R2r3. +Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022. +Weston, J., Chopra, S., and Bordes, A. Memory networks. arXiv preprint arXiv:1410.3916, 2014. +Yang, L., Yu, Z., Zhang, T., Cao, S., Xu, M., Zhang, W., Gonzalez, J. E., and Cui, B. Buffer of thoughts: Thought-augmented reasoning with large language models. Advances in Neural Information Processing Systems, 37: 113519-113544, 2025. +Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T. L., Cao, Y., and Narasimhan, K. Tree of Thoughts: Deliberate problem solving with large language models, 2023. +Yuksekgonul, M., Bianchi, F., Boen, J., Liu, S., Lu, P., Huang, Z., Guestrin, C., and Zou, J. Optimizing generative ai by backpropagating language model feedback. Nature, 639:609-616, 2025. +Zelikman, E., Wu, Y., Mu, J., and Goodman, N. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, 2022. +Zhang, K., Kang, Y., Zhao, F., and Liu, X. LLM-based medical assistant personalization with short- and long-term memory coordination. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for + +Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2386-2398, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naac1-long.132. URL https://aclanthology.org/2024.naac1-long.132/. +Zhang, M., Levine, S., and Finn, C. Memo: Test time robustness via adaptation and augmentation. Advances in neural information processing systems, 35:38629-38642, 2022. +Zhang, T., Patil, S. G., Jain, N., Shen, S., Zaharia, M., Stoica, I., and Gonzalez, J. E. RAFT: Adapting language model to domain specific RAG. In First Conference on Language Modeling, 2024b. URL https://openreview.net/forum?id=rzQGHXNReU. +Zhong, Z., Lei, T., and Chen, D. Training language models with memory augmentation. In Goldberg, Y., Kozareva, Z., and Zhang, Y. (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5657-5673, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.382. URL https://aclanthology.org/2022.emnlp-main.382/. +Zhou, D., Scharli, N., Hou, L., Wei, J., Scales, N., Wang, X., Schuurmans, D., Cui, C., Bousquet, O., Le, Q., et al. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625, 2022. + +# A. Background & Related Work + +# A.1. Test-time learning (online learning) + +Test-time learning—also referred to as online or incremental learning (adaptation)—encompasses a family of methods in which a stochastic model updates its predictions by incorporating information seen during inference, without undergoing conventional, full-scale offline finetuning. Early versions of test-time adaptation focused on local or transductive learning, where a model re-fit or re-weighted its parameters with each new test instance or batch (McCloskey & Cohen, 1989; Thrun & Mitchell, 1995; Amari, 1998; Syed et al., 1999; Bottou & Cun, 2003; Bottou & Le Cun, 2005, inter alia). In computer vision, for example, methods like test-time training have been shown to mitigate domain shifts by optimizing a self-supervised loss on incoming data (Wang et al., 2020; Sun et al., 2020; Liu et al., 2021; Boudiaf et al., 2022; Niu et al., 2022; Zhang et al., 2022; Sun et al., 2024). In the context of natural-language generation, test-time adaptation has appeared under terms such as "dynamic evaluation" (Mikolov et al., 2010; Graves, 2013; Krause et al., 2019; Rannen-Triki et al., 2024), in which a language model is updated with gradient steps on the test-time data itself. + +However, directly updating language model weights at test time can be computationally expensive and requires the capacity to modify parameters. For large-scale, black-box APIs (e.g., GPT-3 or Claude), one often lacks the ability to perform parameter updates easily, thereby making such an approach difficult, if not completely infeasible (Shi et al., 2024). To address this, a growing body of work has explored parameter-free adaptation, whereby one structurally modifies immediate model inputs (e.g., prompting) or draws from external memory to "update" the model's effective reasoning. Our approach aligns with this direction by allowing an LM to iteratively record solutions, explanations, or heuristics in an external memory component over successive interactions, avoiding weight updates entirely. + +In the broader test-time learning literature, reflexive, compositional, and iterative refinement approaches like Reflexion (Shinn et al., 2023), Self-Refine (Madaan et al., 2023), (Self-)Critic (Gou et al., 2023), Chameleon (Lu et al., 2023), Meta-Prompting (Suzgun & Kalai, 2024), and Self-RAG (Asai et al., 2023) inter alia, use feedback loops or verification mechanisms to correct mistakes in solutions. TextGrad (Yuksekgonul et al., 2025) similarly draws on the notion of "textual gradients" as an alternative to parameter-based gradients and provides a pathway for improvement based on the content of mistakes. Our proposed DC framework differs by focusing explicitly on storing generalizable heuristics, solutions, or meta-level insights that can be repeatedly retrieved and applied across tasks, not just to correct a single solution. Furthermore, DC does not require a + +new training loop for each batch or scenario; instead, the memory itself is updated to reflect newly found solutions, errors, or strategies without touching the model weights. + +# A.2. Test-time compute and reasoning + +It is now widely known and accepted that contemporary LLMs such as GPT-4 can exhibit substantial improvements in reasoning and generation capability when additional compute is devoted to inference-time strategies (e.g., chain-of-thought prompting (Wei et al., 2022; Kojima et al., 2022; Zhou et al., 2022), tree-of-thought expansions (Yao et al., 2023; Long, 2023), minimum Bayes risk decoding (Suzgun et al., 2023a; Shi et al., 2022; Golovneva et al., 2023), majority-vote sampling (Wang et al., 2023)). Prompting methods such as Tree-of-Thought (Yao et al., 2023), Graph-of-Thought (Besta et al., 2024), and other non-linear compositional reasoning paradigms systematically enlarge the inference-time search space. They allow models to explore various reasoning paths and exploit consensus or iterative corrections to arrive at more accurate and reliable conclusions (Wei et al., 2022; Wang et al., 2023). + +However, these expansions come at the cost of increased computational overhead per test instance (Yao et al., 2023). They are, however, typically ephemeral: once a solution is generated, subsequent tasks or input samples do not generally benefit from the heavy compute spent earlier, unless the user manually engineers advanced prompt-sharing or in-context demonstration strategies. Cf. (Zelikman et al., 2022). Our work, on the other hand, aims to reduce repeated overhead across multiple test instances of a similar domain by building a memory that persists from one query to the next. This memory not only reduces repetitive mistakes, but also consolidates and codifies robust solution strategies—effectively amortizing or "sharing" the cost of initial reflection across future tasks.[14] + +Another related thread involves tool usage or code execution (Schick et al., 2023; Lu et al., 2023; Shen et al., 2023; Qin et al., 2023; Surís et al., 2023; Suzgun & Kalai, 2024). These studies have explored how LLMs can call external Python interpreters, symbolic solvers, or other specialized + +services and APIs to offload complex computations. Our empirical findings too illustrate that once an LLM under DC recognizes a systematic way (e.g., Python-based brute force algorithm) to handle a certain class of problems (like arithmetic puzzles), it can store that approach in memory and repeatedly retrieve it. Thus, DC not only invests extra compute in a single session but spreads that computational benefit across multiple interactions, effectively learning to use tools more consistently and reliably over time. + +# A.3. Memory-augmented generation and reasoning + +Augmenting language models with external memory has seen renewed interest in recent years (Munkhdalai et al., 2019; Guu et al., 2020; Khandelwal et al., 2020; Bulatov et al., 2022; Borgeaud et al., 2022; Zhong et al., 2022; Feng et al., 2022; He et al., 2024; Wang et al., 2024a)—see also (Graves et al., 2014; Weston et al., 2014; Joulin & Mikolov, 2015; Suzgun et al., 2019) for early studies. Modern retrieval-augmented LLM approaches generally consult an external corpus of documents (i.e., a knowledge base) to improve factuality and reduce hallucination (Lewis et al., 2020; Lazaridou et al., 2023; Vu et al., 2023; Zhang et al., 2024b), but the retrieval corpus is almost always fixed prior to inference and does not evolve over time. These methods have been especially effective for open-domain question answering (Lewis et al., 2020; Guu et al., 2020; Karpukhin et al., 2020), where the model's own parameters may not hold all relevant knowledge. In practice, retrieval augmentation typically involves selecting and concatenating top- $k$ passages from a knowledge-base—while useful for factual queries, the approach, however, does not inherently solve iterative improvement or learning from mistakes in the sense of building upon prior solutions at inference time. + +Another line of research more closely aligns with our vision by storing not just reference knowledge but also the reasoning processes and solution strategies of language models. Several recent works have explored this direction. Thought-R retriever (Feng et al., 2024) logs the model's chain-of-thought from past queries and uses them for new, analogous queries. Buffer-of-Thoughts (BoT; Yang et al., 2025) takes a slightly different approach by distilling high-level "thought templates" from problem-solving processes, though it relies on predefined templates that seem to be tailored towards specific task types that were considered in their experiments. Madaan et al. (2022) have demonstrated that deployed models like GPT-3 can be improved through memory mechanisms that capture user feedback on errors, preventing similar mistakes in future interactions. Zhang et al. (2024a) have proposed a dual memory architecture combining long-term and short-term storage for medical applications, though their approach requires fine-tuning to incorporate new knowledge. + +While these works reveal the many strategies for harnessing memory or feedback, DC emphasizes selectively storing the most relevant insights and heuristics. DC aims to avoid naive accumulation of full raw transcripts and ephemeral chain-of-thought expansions that can lead to memory bloat. Moreover, unlike methods that assume the model can be retrained or finetuned to incorporate memory items, we remain fully external and training-free; this aligns with "plug-and-play" usage principle, in which an off-the-shelf model is augmented by an external memory that it reads from and writes to, but does not require any gradient-based adaptation. + +# B. Additional Figures and Tables + +# B.1. Performance Comparison of Baseline and DC-RS Approaches + +![](images/3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg) +Figure 8: Overall performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS). + +![](images/3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg) +Figure 9: Overall performance of GPT-40 under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS). + +# B.2. Clustering of Errors and Corrections + +![](images/cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg) +tSNE Visualization of the Question Embeddings in GPQA Diamond +Figure 10: t-SNE visualization of the embeddings of the raw questions in GPQA-Diamond. Note that correct and incorrect answers often cluster in latent embedding space. DC can help transfer learned strategies within these clusters, but without careful curation, erroneous heuristics may also spread, thus requiring careful memory refinement and verification of solution strategies. + +# B.3. Evolution of Memory Content under Dynamic Cheatsheet + +![](images/17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg) +Figure 11: This figure illustrates how memory content of GPT-4o evolves over time in Game of 24, quantified using a longest-common-subsequence (LCS)-similarity metric (Suzgun et al., 2024) between consecutive states (measured at the word level). While both DC-Cu and DC-RS show high stability after the first few iterations, DC-Cu experiences slightly greater fluctuations in the second half of inference. + +# B.4. Solution Generator and Memory Curator Prompts + +# B.4.1. Prompt Used by the Generator Model in Baseline + +![](images/3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg) +Figure 12: Prompt used in the baseline (BL) approach, where the model receives minimal instructions. The prompt simply asks the model to answer the given question without any structured guidance, additional reasoning steps, or tool-use encouragement. This setup represents a traditional one-off inference method, reflecting how LLMs typically operate by default. + +# B.4.2. Prompt Used by the Generator Model in DR, FH, and DC Approaches + +# GENERATOR (PROBLEM SOLVER) + +Instruction: You are an expert problem-solving assistant tasked with analyzing and solving various questions using + +a combination of your expertise and provided reference materials. Each task will include: +1. A specific question or problem to solve +2. A cheatsheet containing relevant strategies, patterns, and examples from similar problems + +# ##1.ANALYSIS&STRATEGY + +- Carefully analyze both the question and cheatsheet before starting +- Search for and identify any applicable patterns, strategies, or examples within the cheatsheet +- Create a structured approach to solving the problem at hand +- Review and document any limitations in the provided reference materials + +# ## 2. SOLUTION DEVELOPMENT + +- Present your solution using clear, logical steps that others can follow and review +- Explain your reasoning and methodology before presenting final conclusions +- Provide detailed explanations for each step of the process +- Check and verify all assumptions and intermediate calculations + +# ##3.PROGRAMMINGTASKS + +When coding is required: + +- Write clean, efficient Python code + +- Follow the strict code formatting and execution protocol (always use the Python code formatting block; + +furthermore, after the code block, always explicitly request execution by appending: "EXECUTE CODE!": + +``` +``python + +Your code here + +# EXECUTE CODE! + +- All required imports and dependencies should be clearly declared at the top of your code +- Include clear inline comments to explain any complex programming logic +- Perform result validation after executing your code +- Apply optimization techniques from the cheatsheet when applicable +- The code should be completely self-contained without external file dependencies—it should be ready to be +executed right away +- Do not include any placeholders, system-specific paths, or hard-coded local paths +- Feel free to use standard and widely-used pip packages +- Opt for alternative methods if errors persist during execution +- Exclude local paths and engine-specific settings (e.g., avoid configurations like +chess.engineSimpleEngine.popen_uci("/usr/bin/stockfish") + +# ## 4. FINAL ANSWER FORMAT + +ALWAYS present your final answer in the following format: + +# FINAL ANSWER: + + + +(final answer) + + + +N.B. Make sure that the final answer is properly wrapped inside the block. + +* For multiple-choice questions: Only provide the letter choice (e.g., (A)) +* For numerical answers: Only provide the final number (e.g., 42) +* For other types of answers, including free-response answers: Provide the complete final answer + +Example: + +Q: What is the meaning of life? + +A: [..] + +# FINAL ANSWER: + + + +42 + + + +# CHEATSHEET: + +# [CHEATSHEET] + +" + +Now it is time to solve the following question. + +CURRENTINPUT: + +# [QUESTION] + +Figure 13: Generator prompt used in the DR, FH, and DC approaches, where the model receives structured high-level instructions on solution development, strategy selection, and tool usage. This prompt explicitly encourages Python code generation and execution for computational tasks. Notably, this same structured prompt is used in all non-BL methods, including DC-Ø, DR, FH, DC-Cu, and DC-RS. We also remark that during the initial phases of our experiments, we used "cheatsheet" and "memory" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define $M_{i}$ as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift. + +# B.4.3. Prompt Used by the Memory Curation Model under DC-RS + +# CHEATSHEET CURATOR + +# Purpose and Goals + +You are responsible for maintaining, refining, and optimizing the Dynamic Cheatsheet, which serves as a compact yet evolving repository of problem-solving strategies, reusable code snippets, and meta-reasoning techniques. Your goal is to enhance the model's long-term performance by continuously updating the cheatsheet with high-value insights while filtering out redundant or trivial information. + +- The cheatsheet should include quick, accurate, reliable, and practical solutions to a range of technical and creative challenges. + +- After seeing each input, you should improve the content of the cheatsheet, synthesizing lessons, insights, tricks, and errors learned from past problems and adapting to new challenges. + +# Core Responsibilities + +# Selective Knowledge Retention: + +- Preserve only high-value strategies, code blocks, insights, and reusable patterns that significantly contribute to problem-solving. +- Discard redundant, trivial, or highly problem-specific details that do not generalize well. +- Ensure that previously effective solutions remain accessible while incorporating new, superior methods. + +# Continuous Refinement & Optimization: + +- Improve existing strategies by incorporating more efficient, elegant, or generalizable techniques. +- Remove duplicate entries or rephrase unclear explanations for better readability. +- Introduce new meta-strategies based on recent problem-solving experiences. + +# Structure & Organization: + +- Maintain a well-organized cheatsheet with clearly defined sections: +- Reusable Code Snippets and Solution Strategies +- General Problem-Solving Heuristics +- Optimization Techniques & Edge Cases +-Specialized Knowledge & Theorems + +- Use tagging (e.g., Q14, Q22) to reference previous problems that contributed to a given strategy. + +# Principles and Best Practices + +For every new problem encountered: +1. Evaluate the Solution's Effectiveness +- Was the applied strategy optimal? +- Could the solution be improved, generalized, or made more efficient? +- Does the cheatsheet already contain a similar strategy, or should a new one be added? + +# 2. Curate & Document the Most Valuable Insights + +- Extract key algorithms, heuristics, and reusable code snippets that would help solve similar problems in the future. +- Identify patterns, edge cases, and problem-specific insights worth retaining. +- If a better approach than a previously recorded one is found, replace the old version. + +# 3. Maintain Concise, Actionable Entries + +- Keep explanations clear, actionable, concise, and to the point. +- Include only the most effective and widely applicable methods. +- Seek to extract useful and general solution strategies and/or Python code snippets. + +# 4. Implement a Usage Counter + +Each entry must include a usage count: Increase the count every time a strategy is successfully used in problem-solving. +- Use the count to prioritize frequently used solutions over rarely applied ones. + +# Memory Update Format + +Use the following structure for each memory item: + +·· + + + + + +[Briefly describe the problem context, purpose, and key aspects of the solution.] (Reference: Q1, Q2, Q6, etc.) + + + + + +[Provide a well-documented code snippet, worked-out solution, or efficient strategy.] + + + + + +** Count: [Number of times this strategy has been used to solve a problem.] + + + +[...] + + + +** Count: [...] + +[...] + + + +[...] + + + +- Prioritize accuracy, efficiency & generalizability: The cheatsheet should capture insights that apply across multiple problems rather than just storing isolated solutions. + +- Ensure clarity & usability: Every update should make the cheatsheet more structured, actionable, and easy to navigate. + +- Maintain a balance: While adding new strategies, ensure that old but effective techniques are not lost. + +- Keep it evolving: The cheatsheet should be a living document that continuously improves over time, enhancing test-time meta-learning capabilities. + +N.B. Keep in mind that once the cheatsheet is updated, any previous content not directly included will be lost and cannot be retrieved. + +Therefore, make sure to explicitly copy any (or all) relevant information from the previous cheatsheet to the new cheatsheet! Furthermore, + +make sure that all information related to the cheatsheet is wrapped inside the block. + +Figure 14: Prompt used for the memory curator under DC-RS, which is responsible for maintaining an evolving repository of problem-solving strategies, code snippets, and heuristics. The curator selectively retains high-value insights, refines existing strategies, and organizes memory efficiently. This ensures the memory (cheatsheet) remains concise, generalizable, and action-oriented, continuously improving test-time reasoning. (Once again, we note that during the initial phases of our experiments, we used "cheatsheet" and "memory" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define $M_{i}$ as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift.) + +![](images/c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg) +Figure 15: The rest of the prompt used by the memory curator under DC-RS (Figure 14). \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07952/images/0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg b/data/2025/2504_07xxx/2504.07952/images/0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e326fb03c292319ec06e327f8b0e29a1ee18e93d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa6d0c52e8620ff2dd358c46f56e5ff7b22bd87b17fd15778ce715cb39db619c +size 25373 diff --git a/data/2025/2504_07xxx/2504.07952/images/17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg b/data/2025/2504_07xxx/2504.07952/images/17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3ec2745bbe57bc5be4de50d0a2c7998330144b2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99b3a4cda29b682fb6073645ea1b6f1ed615f5b4d590e341a72e093dc9a74eb7 +size 62891 diff --git a/data/2025/2504_07xxx/2504.07952/images/206eeb3ff21667424a8371866b07de21ec4ae33ce5701128c9f405813680a1b5.jpg b/data/2025/2504_07xxx/2504.07952/images/206eeb3ff21667424a8371866b07de21ec4ae33ce5701128c9f405813680a1b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e30b4e2c37e5c0ce8a327d605032da7792ecdda9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/206eeb3ff21667424a8371866b07de21ec4ae33ce5701128c9f405813680a1b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bbbdbfc02e3dcf9fea6f57cc76b68380cad2d903b96a27dfc051b4032f01cc3 +size 2959 diff --git a/data/2025/2504_07xxx/2504.07952/images/2e1042422c326caf6fa103143f04c7b7cd213d5d0d13eda4d651f56c3def875c.jpg b/data/2025/2504_07xxx/2504.07952/images/2e1042422c326caf6fa103143f04c7b7cd213d5d0d13eda4d651f56c3def875c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c1613b1bf24b364d71ceb7468eac618d93ede68 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/2e1042422c326caf6fa103143f04c7b7cd213d5d0d13eda4d651f56c3def875c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dda898afa70137dbbfed47bad4e4bb25c71c5b5a18101e57a1584f96ac0b069 +size 4687 diff --git a/data/2025/2504_07xxx/2504.07952/images/3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg b/data/2025/2504_07xxx/2504.07952/images/3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7552f6b9d54cb58830ef768cbaf9948f5a636ab0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45e626e0ea25910c77ca1540f528161a2cd4b5a2e7f7558074c1b7dce336cbe5 +size 35147 diff --git a/data/2025/2504_07xxx/2504.07952/images/3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg b/data/2025/2504_07xxx/2504.07952/images/3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66570d9e3f74de42237005004db92e10a635db91 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6866dfe5b6eb011a59cdb14ff0809230e4059d09a159ebdaddf0d7f621b5a6a1 +size 78849 diff --git a/data/2025/2504_07xxx/2504.07952/images/3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg b/data/2025/2504_07xxx/2504.07952/images/3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d34f733d1bad643cbf98817e6e8526ba619bc46 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6049a7d725fe5eea1dfba48f117718c1af8a9d6d7b452dd7626635eeb50a5299 +size 35344 diff --git a/data/2025/2504_07xxx/2504.07952/images/3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg b/data/2025/2504_07xxx/2504.07952/images/3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05164aac9f67b33f81c788cd5dec78624a0fd38f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84189c22386e9e1020d0739902bc6074cece04eb6baafa160a80a60b07a85ec7 +size 23423 diff --git a/data/2025/2504_07xxx/2504.07952/images/3ed04592319cec982ee33286db9f8cbbd38154a630abd830e4b00384da4d9283.jpg b/data/2025/2504_07xxx/2504.07952/images/3ed04592319cec982ee33286db9f8cbbd38154a630abd830e4b00384da4d9283.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f98286b79daffca4f0052a05ef6b0fabe3f38fba --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/3ed04592319cec982ee33286db9f8cbbd38154a630abd830e4b00384da4d9283.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a5ab6d6bcb09633cc9a486cecd989a9f5f234d3d1821f3041ba24444dfb02e +size 3180 diff --git a/data/2025/2504_07xxx/2504.07952/images/4c761fb056c52ac2dda0586b9ecf8f17216ffcf9839d5398281625731563d48e.jpg b/data/2025/2504_07xxx/2504.07952/images/4c761fb056c52ac2dda0586b9ecf8f17216ffcf9839d5398281625731563d48e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44df57e2696ac3edae57db06a2a30e454c1799bc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/4c761fb056c52ac2dda0586b9ecf8f17216ffcf9839d5398281625731563d48e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55f7938024a51046ba6d5efba92356ead7ff82859e000ea486ecd8b2592f2eb0 +size 20299 diff --git a/data/2025/2504_07xxx/2504.07952/images/504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg b/data/2025/2504_07xxx/2504.07952/images/504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b70b848f40ad77f626e6a45bc51519c65bc03a9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6883dba4f7568f93ea5944c1563765a496b85d73d6ed65a783c6242792546a70 +size 32023 diff --git a/data/2025/2504_07xxx/2504.07952/images/5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg b/data/2025/2504_07xxx/2504.07952/images/5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2986da620e6dc2904762bcf2ba1635dc100176fb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eac57ce9cc9f718320d7769fa93960d795c0297e42f148f511f41754be81c41 +size 23995 diff --git a/data/2025/2504_07xxx/2504.07952/images/6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg b/data/2025/2504_07xxx/2504.07952/images/6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7544f02d42cb2ea8ce92304a92269c01820d31c9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64cd68551401ad6e2c4a2b81b84eb168110c840be90704fd464a1cae58e459a6 +size 28299 diff --git a/data/2025/2504_07xxx/2504.07952/images/929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg b/data/2025/2504_07xxx/2504.07952/images/929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..075e5561beb5e3081b6e84ce1a6f47f7398affd8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:869903fd8b899f35de92235e475e72f226796c01650fb31b170dc58e8fadc99b +size 24965 diff --git a/data/2025/2504_07xxx/2504.07952/images/92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg b/data/2025/2504_07xxx/2504.07952/images/92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5d0f33007c90f301367ffce2cae8e4084ce91d1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94ec6ba4e642c3d39d99fe6a5ec3876a7e6f040632551b33565ed46ba08c10e9 +size 33956 diff --git a/data/2025/2504_07xxx/2504.07952/images/965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg b/data/2025/2504_07xxx/2504.07952/images/965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a370238715bef6e3430e1a2034352da46a75da9b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcfb7b90a566581e6d481417d7c3f5e138513ddd2d2caf25fb787a604f5a771f +size 78364 diff --git a/data/2025/2504_07xxx/2504.07952/images/9d6affcf252b705e810fdd65349c4205d86943abc5b929094e9861ab0e1c47f1.jpg b/data/2025/2504_07xxx/2504.07952/images/9d6affcf252b705e810fdd65349c4205d86943abc5b929094e9861ab0e1c47f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e627bcfb7f5ff512f12c5543f7d7e63a6e55dfc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/9d6affcf252b705e810fdd65349c4205d86943abc5b929094e9861ab0e1c47f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b5cbb58c67e6ea22264a0fd60667dd89838c90173d38491493734527d76172 +size 43721 diff --git a/data/2025/2504_07xxx/2504.07952/images/a96fc554773f447b0b92412be8f2e3f8819c76f4e33c639b283090006003112c.jpg b/data/2025/2504_07xxx/2504.07952/images/a96fc554773f447b0b92412be8f2e3f8819c76f4e33c639b283090006003112c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..067ff845f003e42b5c5bf9e4e836a644269c56dd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/a96fc554773f447b0b92412be8f2e3f8819c76f4e33c639b283090006003112c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9eafdff7cea2d3ac0c153f902fbd420afd2c60929c41dcd6f5d9a3d18cb1b5ac +size 20459 diff --git a/data/2025/2504_07xxx/2504.07952/images/b2dd0d0d80362dd89fd243809f6becce8148dccb76a5cfdb8c154a007b7f43d8.jpg b/data/2025/2504_07xxx/2504.07952/images/b2dd0d0d80362dd89fd243809f6becce8148dccb76a5cfdb8c154a007b7f43d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9711113e9328c04707c3c38c1c8c6b23e7c05271 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/b2dd0d0d80362dd89fd243809f6becce8148dccb76a5cfdb8c154a007b7f43d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e28c4853a2bd82838e506bb087a5ace74d4424fb65ad6d7f5ff02adbc8c51e36 +size 85347 diff --git a/data/2025/2504_07xxx/2504.07952/images/c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg b/data/2025/2504_07xxx/2504.07952/images/c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b06c82b6349c374a461b92f279eda50e161ca317 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce9f9f27121eba5706ef71c2d77c508b79a06fc7b76bc56a392a855746a5d44e +size 20528 diff --git a/data/2025/2504_07xxx/2504.07952/images/c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg b/data/2025/2504_07xxx/2504.07952/images/c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b477b8cdfa09a9fad2f693e5fc6987ca4afd23f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a26ecbbcdf05ca15c86be7fac120c218065bf4b3c951307cdb3ee8077693aa8 +size 52014 diff --git a/data/2025/2504_07xxx/2504.07952/images/c67680ddd141bf7a0b7bf1fd84af90daa5dd07c8b31a9a66945d58d4ac2b9f56.jpg b/data/2025/2504_07xxx/2504.07952/images/c67680ddd141bf7a0b7bf1fd84af90daa5dd07c8b31a9a66945d58d4ac2b9f56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..654327bfc827274fb460169a38eb53d8c4d5abee --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/c67680ddd141bf7a0b7bf1fd84af90daa5dd07c8b31a9a66945d58d4ac2b9f56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccf3d55e997044942caf7fcd2e001228132f4d6e3ea684f89ffd89f948e5a7cd +size 3767 diff --git a/data/2025/2504_07xxx/2504.07952/images/cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg b/data/2025/2504_07xxx/2504.07952/images/cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68de9f1830f65c19188969fce4c9e929a93315f9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e536bb9634cd967ab82b9a4350426d1c8d5f26ca99ba4d1889fd8beea3b1732 +size 64288 diff --git a/data/2025/2504_07xxx/2504.07952/images/dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg b/data/2025/2504_07xxx/2504.07952/images/dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ceab3241759d0ceec54b950105feb5e22cbb7b5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13db0c04b5b79caf2c59b20f73c6e7e982a6ef60477bd8f362243d0892d22003 +size 21650 diff --git a/data/2025/2504_07xxx/2504.07952/images/e98633cbd3c95e00f68830058420b187b89948e75e360e2901ac5fd7b75024d3.jpg b/data/2025/2504_07xxx/2504.07952/images/e98633cbd3c95e00f68830058420b187b89948e75e360e2901ac5fd7b75024d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a25a62674e6d66b9c7688371b5535746987e2380 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/e98633cbd3c95e00f68830058420b187b89948e75e360e2901ac5fd7b75024d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23cd3e7b80111e1ce2abe7ca4587977c06d9a25a33925e2bb7a68c843cc7dff2 +size 3694 diff --git a/data/2025/2504_07xxx/2504.07952/images/ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg b/data/2025/2504_07xxx/2504.07952/images/ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..492378723e9122ec7958446c2c5121942331aae6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/images/ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e1c74e4eb719ebfb4f8540097ab83518054618696eae6c2b0faacd908d67ab5 +size 15131 diff --git a/data/2025/2504_07xxx/2504.07952/layout.json b/data/2025/2504_07xxx/2504.07952/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..79c83985b0946ae7d9b48b2d98825699128283fd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07952/layout.json @@ -0,0 +1,18216 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 98, + 500, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 98, + 500, + 114 + ], + "spans": [ + { + "bbox": [ + 94, + 98, + 500, + 114 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 149, + 485, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 149, + 485, + 163 + ], + "spans": [ + { + "bbox": [ + 108, + 149, + 485, + 163 + ], + "type": "text", + "content": "Mirac Suzgun1 Mert Yuksekgonul1 Federico Bianchi2 Dan Jurafsky1 James Zou1,2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 273, + 184, + 321, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 184, + 321, + 196 + ], + "spans": [ + { + "bbox": [ + 273, + 184, + 321, + 196 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "spans": [ + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": "Despite their impressive performance on complex tasks, current language models (LMs) typically operate in a vacuum: Each input query is processed separately, without retaining insights from previous attempts. Here, we present Dynamic Cheatsheet (DC), a lightweight framework that endows a black-box LM with a persistent, evolving memory. Rather than repeatedly re-discovering or re-committing the same solutions and mistakes, DC enables models to store and reuse accumulated strategies, code snippets, and general problem-solving insights at inference time. This test-time learning enhances performance substantially across a range of tasks without needing explicit ground-truth labels or human feedback. Leveraging DC, Claude 3.5 Sonnet's accuracy more than doubled on AIME math exams once it began retaining algebraic insights across questions. Similarly, GPT-4o's success rate on the Game of 24 puzzle increased from about " + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": " after the model discovered and reused a Python-based solution. In tasks prone to arithmetic mistakes, such as balancing equations, DC enabled GPT-4o and Claude to reach near-perfect accuracy by recalling previously validated code, whereas their baselines stagnated around " + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": ". Beyond arithmetic challenges, DC yields notable accuracy gains on knowledge-demanding tasks. Claude achieved a " + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": " improvement in GPQA-Diamond and an " + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 71, + 200, + 523, + 427 + ], + "type": "text", + "content": " boost on MMLU-Pro Engineering and Physics problems. Crucially, DC's memory is self-curated, focusing on concise, transferable snippets rather than entire transcripts, thereby facilitating meta-learning and avoiding context ballooning. Unlike fine-tuning or static retrieval methods, DC adapts LMs' problem-solving skills on the fly, without modifying their underlying parameters, and offers a practical approach for continuously refining responses and cutting routine errors. Overall, our findings present DC as a promising approach for augmenting LMs with persistent memory, bridging the divide between isolated inference events and the cumulative, experience-driven learning characteristic of human cognition.*" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 70, + 437, + 293, + 588 + ], + "blocks": [ + { + "bbox": [ + 70, + 437, + 293, + 588 + ], + "lines": [ + { + "bbox": [ + 70, + 437, + 293, + 588 + ], + "spans": [ + { + "bbox": [ + 70, + 437, + 293, + 588 + ], + "type": "image", + "image_path": "504ebbc6428ef94b18208a5e2289adf9074bf5b956cbf3a1b292575deb49ed18.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "lines": [ + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "spans": [ + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": "Figure 1: Comparison of different baselines and Dynamic Cheatsheet (DC) variants on challenging reasoning benchmarks, including AIME exams and GPQA-Diamond. Baseline represents a standard prompting approach with minimal guidance, while DC- " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": " (a stronger baseline) contains explicit structured instructions for problem solving, as well as for Python code generation and execution, but lacks a memory component. Our proposed DC-Cu and DC-RS variants incorporate an evolving, text-based memory to enhance inference-time learning. Results (accuracy, %) demonstrate substantial improvements, with Claude 3.5 Sonnet gaining " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": " on AIME 2024 and " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": " on AIME 2025 under DC-Cu. In Game of 24, GPT-4o leaps from " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": " (baseline) to " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": " under DC-RS, reflecting its ability to retain and apply Python-based solutions efficiently. Similarly, Claude 3.5 Sonnet's accuracy more than doubles in Math Equation Solver, reaching " + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 51, + 597, + 543, + 679 + ], + "type": "text", + "content": ". Overall, these findings highlight the impact of test-time learning through controlled memory augmentation and efficient retrieval." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 298, + 437, + 378, + 588 + ], + "blocks": [ + { + "bbox": [ + 298, + 437, + 378, + 588 + ], + "lines": [ + { + "bbox": [ + 298, + 437, + 378, + 588 + ], + "spans": [ + { + "bbox": [ + 298, + 437, + 378, + 588 + ], + "type": "image", + "image_path": "ff3eddb0ac8c9c521f6b81bc1e872f6fcce3319dd81f63ccf9e8e7f3c7dce2e3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 383, + 437, + 526, + 588 + ], + "blocks": [ + { + "bbox": [ + 383, + 437, + 526, + 588 + ], + "lines": [ + { + "bbox": [ + 383, + 437, + 526, + 588 + ], + "spans": [ + { + "bbox": [ + 383, + 437, + 526, + 588 + ], + "type": "image", + "image_path": "dd056ff42f9f749c24e9c33257f16e5ab66dfe66f9fd3b7c310d8c6d9476a377.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.07952v1 [cs.LG] 10 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "spans": [ + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "text", + "content": "Stanford University " + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "text", + "content": "Together AI. " + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "inline_equation", + "content": "\\boxtimes" + }, + { + "bbox": [ + 66, + 694, + 474, + 716 + ], + "type": "text", + "content": " Correspondence to: msuzgun@stanford.edu and jamesz@stanford.edu. \n*We release all our data, results, and code at http://github.com/suzgunmirac/dynamic-cheatsheet." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 66, + 133, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 66, + 133, + 79 + ], + "spans": [ + { + "bbox": [ + 53, + 66, + 133, + 79 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 291, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 291, + 218 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 291, + 218 + ], + "type": "text", + "content": "Modern large language models (LLMs) can tackle complex reasoning tasks, answer various questions, and generate extensive texts. Yet they still suffer from one critical limitation: once deployed, these models are fixed prior to deployment and typically retain no explicit or implicit memory of past questions, successes, or mistakes during inference. They approach each new problem de novo, often re-deriving the same insights—and re-committing the same errors. In contrast, human cognition stands on a foundation of incremental learning, continuously internalizing new experiences and solutions into a persistent mental model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 224, + 291, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 224, + 291, + 416 + ], + "spans": [ + { + "bbox": [ + 52, + 224, + 291, + 416 + ], + "type": "text", + "content": "In this work, we present Dynamic Cheatsheet (DC), a simple and intuitive framework that endows black-box LLMs with a persistent, evolving memory at inference time. Rather than fine-tuning weights (for instance, through dynamic evaluation (Krause et al., 2019) or domain adaptation (Gururangan et al., 2020)) or retrieving facts from a massive static corpus (as in traditional retrieval-augmented generation systems (Guu et al., 2020; Zhang et al., 2024b)), DC dynamically curates a compact library of reusable strategies, solution sketches, and code snippets. Either before or after each query, DC enables the system to decide which lessons to store, what to discard, and how to refine existing entries—thus effectively \"learning\" from successes and failures. It is a flexible online-learning approach that enables a black-box LLM to improve itself without needing any explicit ground truth labels or human feedback." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 422, + 291, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 422, + 291, + 589 + ], + "spans": [ + { + "bbox": [ + 52, + 422, + 291, + 589 + ], + "type": "text", + "content": "The overall workflow of DC is intuitive and compelling. In one version of DC (DC-Cu.), when presented with a new query, the LM first consults its external memory to see if any prior insights, strategies or relevant model solutions have been stored. It then proposes a solution by combining the retrieved insights with its own internal reasoning capabilities. Upon generating an answer, it then proceeds to a curation phase that updates the memory: If the approach seems to be correct, useful, or practical, DC codifies it in its memory for future use; if an error surfaces, DC may revise or prune faulty heuristics. This all happens without gradient-based parameter updates, so computational overhead remains modest, and compatibility with black-box APIs (e.g., GPT-4 or Claude) is fully preserved. See Figure 4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": "We tested DC across multiple challenging benchmarks and observed that it increases performance and reduces repetitive mistakes. On AIME 2024, Claude 3.5 Sonnet jumped from " + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": " accuracy, more than doubling its baseline score, by retaining algebraic and combinatorial insights. Likewise, it gained " + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": " accuracy on AIME 2025. Notably, these improvements hold in knowledge-intensive tasks as well. On GPQA-Diamond, which tests specialized domain questions, DC lifted Claude by over " + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": ". In MMLU-Pro Engineering and Physics, it provided up to an " + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 52, + 594, + 291, + 715 + ], + "type": "text", + "content": " boost in" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 308, + 67, + 538, + 249 + ], + "blocks": [ + { + "bbox": [ + 308, + 67, + 538, + 249 + ], + "lines": [ + { + "bbox": [ + 308, + 67, + 538, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 67, + 538, + 249 + ], + "type": "image", + "image_path": "6ec5d4b82fc2fec50289e6f56bacca63d933fcf0c48267f64fa133ea1a802676.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 256, + 543, + 288 + ], + "lines": [ + { + "bbox": [ + 304, + 256, + 543, + 288 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 543, + 288 + ], + "type": "text", + "content": "Figure 2: Overall task performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (BL) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 304, + 544, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 544, + 330 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 544, + 330 + ], + "type": "text", + "content": "performance by allowing the model to maintain a \" toolkit\" of formulas and general problem-solving patterns." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 335, + 543, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 335, + 543, + 479 + ], + "spans": [ + { + "bbox": [ + 303, + 335, + 543, + 479 + ], + "type": "text", + "content": "An even more striking and compelling example is the Game of 24, a puzzle that requires the solver to combine four digits into an arithmetic expression equaling 24. GPT-4o's baseline performance (10%) increased to 99% under DC. Early in the test sequence, the model discovered that an efficient Python brute-force solver eliminated all manual guesswork. Once this snippet was stored, GPT-4o simply retrieved it for subsequent queries, avoiding manual arithmetic entirely. We saw a similar pattern in Math Equation Balancer, where GPT-4o and Claude soared from 45-50% to 98-100% by \"recalling\" a straightforward code-based approach instead of manually fumbling with numeric manipulations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 484, + 543, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 484, + 543, + 581 + ], + "spans": [ + { + "bbox": [ + 303, + 484, + 543, + 581 + ], + "type": "text", + "content": "Nonetheless, DC is not a panacea. We found that smaller models, such as GPT-4o-mini, benefit from DC in limited amounts. These models generate too few correct solutions in these challenging tasks in the first place, leaving the memory populated with flawed or incomplete strategies. Worse, they struggle to refine stored content. DC can amplify the strengths of models that can already produce high-quality outputs, but not fix foundational gaps in reasoning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 586, + 544, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 586, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 586, + 544, + 717 + ], + "type": "text", + "content": "We also note that DC differs from naive \"append the entire conversation history\" in-context learning approaches. Under DC, memory is carefully curated, focusing on succinct, useful, and transferable knowledge over raw transcripts. This prevents ballooning context lengths (Liu et al., 2024a) and helps ensure that repeated retrieval remains tractable. Indeed, part of DC's contribution is in formalizing a mechanism for selective, evolving retention—storing just enough to solve the next set of tasks without drowning in an ever-growing text buffer. Cf. (Karpicke & Roediger III, 2008; Roediger & Butler, 2011; Karpicke & Blunt, 2011)" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 64, + 216, + 171 + ], + "blocks": [ + { + "bbox": [ + 53, + 64, + 216, + 171 + ], + "lines": [ + { + "bbox": [ + 53, + 64, + 216, + 171 + ], + "spans": [ + { + "bbox": [ + 53, + 64, + 216, + 171 + ], + "type": "image", + "image_path": "3cf3983f23c859e88d5921b3f4d9b8fd44874ef98f78cea1bad717f13efae90a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 179, + 542, + 222 + ], + "lines": [ + { + "bbox": [ + 51, + 179, + 542, + 222 + ], + "spans": [ + { + "bbox": [ + 51, + 179, + 542, + 222 + ], + "type": "text", + "content": "Figure 3: Algorithmic illustration of the Dynamic Cheatsheet (DC)-based approaches and other baseline methods. Here, Gen represents the solution generator model, Cur the memory curator, and Retr the retriever. While we use the same black-box LLMs for both generation and curation, we differentiate their roles via task-agnostic instructions (prompts). The retrieval mechanism ranks historical inputs based on cosine similarity with the current query, selecting the most relevant past examples along with their generated solutions." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 220, + 64, + 377, + 171 + ], + "blocks": [ + { + "bbox": [ + 220, + 64, + 377, + 171 + ], + "lines": [ + { + "bbox": [ + 220, + 64, + 377, + 171 + ], + "spans": [ + { + "bbox": [ + 220, + 64, + 377, + 171 + ], + "type": "image", + "image_path": "c0718f44a9608275d94210fa91cb6ad2b96b9bc7c811b2ceda617f99ef271065.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 384, + 64, + 542, + 171 + ], + "blocks": [ + { + "bbox": [ + 384, + 64, + 542, + 171 + ], + "lines": [ + { + "bbox": [ + 384, + 64, + 542, + 171 + ], + "spans": [ + { + "bbox": [ + 384, + 64, + 542, + 171 + ], + "type": "image", + "image_path": "5d4189c7af6f4ad1886d1376f3f1be4e220294b63d135fd02fa2b265f1888f25.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 227, + 272, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 227, + 272, + 241 + ], + "spans": [ + { + "bbox": [ + 52, + 227, + 272, + 241 + ], + "type": "text", + "content": "2. Dynamic Cheatsheet (DC) Methodology" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 247, + 291, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 247, + 291, + 354 + ], + "spans": [ + { + "bbox": [ + 52, + 247, + 291, + 354 + ], + "type": "text", + "content": "DC, in its core, includes an external, non-parametric memory that evolves in tandem with the LLM's inference process. Rather than fine-tuning the underlying weights, DC tracks successes and failures of the model at test time, then selectively stores heuristics, strategies, or short textual artifacts that can guide the LLM in future instances. Notably, this approach respects the black-box nature of many commercial LLM APIs: no gradient-based updates are required, and the model's core parameters remain untouched." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 368, + 243, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 368, + 243, + 380 + ], + "spans": [ + { + "bbox": [ + 52, + 368, + 243, + 380 + ], + "type": "text", + "content": "2.1. DC: Building Blocks and Iterative Loop" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 387, + 291, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 387, + 291, + 422 + ], + "spans": [ + { + "bbox": [ + 52, + 387, + 291, + 422 + ], + "type": "text", + "content": "The DC framework consists of two core modules: generation and curation. Both modules can easily operate on top of the same LM (prompted differently) or on separate LMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 434, + 227, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 434, + 227, + 446 + ], + "spans": [ + { + "bbox": [ + 52, + 434, + 227, + 446 + ], + "type": "text", + "content": "2.1.1. Solution Generation with Memory" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": "Let's consider a sequence of inputs " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "(x_{1},x_{2},\\ldots ,x_{n})" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " , where each " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "x_{i}\\sim \\mathcal{D}_{\\mathrm{test}}" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " indicates a new query or problem posed to the model sampled from the same distribution " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{test}}" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " (a typical setting in online learning). The distribution " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{test}}" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " is unknown to us. At the " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " -th step, the model is provided with both the new query " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " and the current memory state " + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 453, + 290, + 548 + ], + "type": "text", + "content": " which captures knowledge gleaned from previous successes and failures. We denote the solution generator by Gen:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 134, + 555, + 290, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 555, + 290, + 567 + ], + "spans": [ + { + "bbox": [ + 134, + 555, + 290, + 567 + ], + "type": "interline_equation", + "content": "\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {1}", + "image_path": "206eeb3ff21667424a8371866b07de21ec4ae33ce5701128c9f405813680a1b5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "spans": [ + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "inline_equation", + "content": "\\tilde{y}_i" + }, + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "text", + "content": " is the candidate solution produced by the model. " + }, + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 573, + 291, + 609 + ], + "type": "text", + "content": " helps condition the model to reuse or adapt previously stored solutions, insights, techniques, or heuristics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 620, + 180, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 620, + 180, + 633 + ], + "spans": [ + { + "bbox": [ + 52, + 620, + 180, + 633 + ], + "type": "text", + "content": "2.1.2. Memory Curation Step" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "spans": [ + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "text", + "content": "After the generator produces its answer " + }, + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "inline_equation", + "content": "\\tilde{y}_i" + }, + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 52, + 639, + 291, + 663 + ], + "type": "text", + "content": ", the curator, Cur, updates the current content of the memory:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 669, + 290, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 669, + 290, + 682 + ], + "spans": [ + { + "bbox": [ + 121, + 669, + 290, + 682 + ], + "type": "interline_equation", + "content": "M _ {i + 1} = \\operatorname {C u r} \\left(M _ {i}, x _ {i}, \\tilde {y} _ {i}\\right) \\tag {2}", + "image_path": "c67680ddd141bf7a0b7bf1fd84af90daa5dd07c8b31a9a66945d58d4ac2b9f56.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 693, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 291, + 717 + ], + "type": "text", + "content": "During memory curation, Cur mainly considers: (i) the usefulness and generalizability of the newly produced answer" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 228, + 544, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 228, + 544, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 228, + 544, + 324 + ], + "type": "text", + "content": "(i.e., if " + }, + { + "bbox": [ + 304, + 228, + 544, + 324 + ], + "type": "inline_equation", + "content": "\\tilde{y}_i" + }, + { + "bbox": [ + 304, + 228, + 544, + 324 + ], + "type": "text", + "content": " is correct or provides valuable and generalizable insights, it is distilled into a form suitable for later reference), (ii) refinement or removal of existing memory entries (i.e., if an existing memory entry was incorrect or superseded by a more efficient or versatile strategy, Cur may remove or update it), and (iii) clarity and compactness of the entire memory (i.e., memory entries are consolidated to retain succinct, high-impact references and heuristics)." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 308, + 334, + 538, + 434 + ], + "blocks": [ + { + "bbox": [ + 308, + 334, + 538, + 434 + ], + "lines": [ + { + "bbox": [ + 308, + 334, + 538, + 434 + ], + "spans": [ + { + "bbox": [ + 308, + 334, + 538, + 434 + ], + "type": "image", + "image_path": "92f9a34273fa84fe1598a46a5bfd6c72fab0f2c94d9475ea668cfc3b4a151d44.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 439, + 538, + 450 + ], + "lines": [ + { + "bbox": [ + 307, + 439, + 538, + 450 + ], + "spans": [ + { + "bbox": [ + 307, + 439, + 538, + 450 + ], + "type": "text", + "content": "Figure 4: Illustration of Dynamic Cheatsheet (DC-Cu variant)." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 453, + 543, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 543, + 561 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 543, + 561 + ], + "type": "text", + "content": "Cur does not have access to ground-truth labels; so, it has to assess the correctness and efficiency of the solutions by itself before updating the memory. In our experiments, we instruct a single model to perform this crucial step. Yet, in practice, Cur can be implemented as a series of steps that instruct multiple tools and models, through different prompts, to verify the validity and efficiency of the solution and to transform the raw solution text into even more generalizable, reliable, and efficient strategies, insights, and code snippets." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 566, + 542, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 566, + 542, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 566, + 542, + 650 + ], + "type": "text", + "content": "We refer to this version of DC above as DC-Cu (short for DC-Cumulative). Under DC-Cu, the system first performs solution generation based on the current memory (Eqn. 1) and then updates the memory (Eqn. 2), by cumulatively expanding and refining the memory items thus far. Unlike DC-RS, which is discussed in the next part, DC-Cu, does not contain a retrieval component, however." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 662, + 495, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 662, + 495, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 662, + 495, + 675 + ], + "type": "text", + "content": "2.2. DC with Retrieval & Synthesis (DC-RS)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "type": "text", + "content": "DC-Cu has two potential drawbacks. First, it updates the memory after processing an input query, rather than refining it before generating a response. This means the model lacks" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 152 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 152 + ], + "type": "text", + "content": "the opportunity to incorporate new insights from the current query while reasoning through its solution. Second, DC-Cu does not store or revisit past input-output pairs unless explicitly retained in memory. This omission prevents the model from directly retrieving and leveraging historical responses, which can be particularly valuable in benchmarks covering diverse topics or domains (e.g., GPQA-Diamond)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 157, + 290, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 157, + 290, + 240 + ], + "spans": [ + { + "bbox": [ + 52, + 157, + 290, + 240 + ], + "type": "text", + "content": "To address these issues, DC-RS modifies the sequence of memory updates and introduces a retrieval mechanism, Retr, into the curation process. Retr allows the model to retrieve the most relevant past input-output pairs from its knowledge base. By refining the memory before responding and retrieving prior cases when needed, DC-RS enhances the model's adaptability and reasoning efficiency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "spans": [ + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": "DC-RS first retrieves" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": " top-" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": " most similar inputs, along with their model-generated outputs, from previously seen examples, which we denote by " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "R_{i}^{(k)}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": " (or simply " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "R_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ").2 It then passes these select examples, " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "R_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ", along with the most recent memory content, " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "M_{i-1}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ", to the curator to update the memory, that is to get " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ". Finally, it uses the generator to produce " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "\\tilde{y}_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ", given " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 246, + 291, + 332 + ], + "type": "text", + "content": ". We summarize all these steps below:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 342, + 290, + 356 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 290, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 290, + 356 + ], + "type": "interline_equation", + "content": "R _ {i} = \\operatorname {R e t r} \\left(x _ {i}, \\left\\{\\left(x _ {j}, \\tilde {y} _ {j}\\right) \\right\\} _ {j < i}, k\\right) \\tag {3}", + "image_path": "2e1042422c326caf6fa103143f04c7b7cd213d5d0d13eda4d651f56c3def875c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 357, + 290, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 290, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 290, + 370 + ], + "type": "interline_equation", + "content": "M _ {i} = \\operatorname {C u r} \\left(M _ {i - 1}, x _ {i}, R _ {i}\\right) \\tag {4}", + "image_path": "e98633cbd3c95e00f68830058420b187b89948e75e360e2901ac5fd7b75024d3.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 372, + 290, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 372, + 290, + 385 + ], + "spans": [ + { + "bbox": [ + 107, + 372, + 290, + 385 + ], + "type": "interline_equation", + "content": "\\tilde {y} _ {i} = \\operatorname {G e n} \\left(x _ {i}, M _ {i}\\right) \\tag {5}", + "image_path": "3ed04592319cec982ee33286db9f8cbbd38154a630abd830e4b00384da4d9283.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 400, + 113, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 400, + 113, + 411 + ], + "spans": [ + { + "bbox": [ + 52, + 400, + 113, + 411 + ], + "type": "text", + "content": "2.3. Baselines" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 418, + 291, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 418, + 291, + 442 + ], + "spans": [ + { + "bbox": [ + 52, + 418, + 291, + 442 + ], + "type": "text", + "content": "To quantify the efficacy of memory-driven test-time learning, we compare DC and its variants to four baselines:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 449, + 291, + 586 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 52, + 449, + 291, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 449, + 291, + 495 + ], + "spans": [ + { + "bbox": [ + 52, + 449, + 291, + 495 + ], + "type": "text", + "content": "(1) Baseline prompting (BL). This plain \"vanilla\" prompting approach, with minimal instructions, simply prompts the model without any iterative memory or retrieval mechanism. It reflects traditional one-off inference." + }, + { + "bbox": [ + 52, + 449, + 291, + 495 + ], + "type": "inline_equation", + "content": "^3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "spans": [ + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "text", + "content": "(2) DC-" + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "text", + "content": " (empty memory). To isolate the effect of memory curation, this DC baseline always keeps the memory content effectively empty. " + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "text", + "content": "DC-" + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 52, + 502, + 290, + 586 + ], + "type": "text", + "content": " allows us to measure how much performance improvement arises purely from storing and reusing knowledge over time. While there is no continuous knowledge storage or strategy reuse, this method follows the instructions in Figure 13 and is therefore a strong baseline." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 67, + 542, + 216 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "content": "(3) Full-History Appending (FH). This is a naive approach that appends the entire conversation history to the model input without any curation or truncation. FH can exceed context-window limits and include redundant or low-value information, but nonetheless, it provides a useful comparison for methods that actively curate content." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 144, + 542, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 542, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 542, + 216 + ], + "type": "text", + "content": "(4) Dynamic Retrieval (DR). A final baseline uses retrieval but no curation. Specifically, for each new query, it retrieves the most similar past interactions and directly pastes them, verbatim, into the prompt. DR can help the model see relevant input-output pairs but not directly codify any abstract or generalized solutions.7" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 223, + 542, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 223, + 542, + 247 + ], + "spans": [ + { + "bbox": [ + 304, + 223, + 542, + 247 + ], + "type": "text", + "content": "Figure 3 (above) contains pseudocodes of all the primary methods and baselines considered in this paper." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 262, + 422, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 422, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 422, + 276 + ], + "type": "text", + "content": "3. Experimental Setup" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 282, + 406, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 406, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 406, + 293 + ], + "type": "text", + "content": "3.1. Tasks and Datasets" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 303, + 300, + 543, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 300, + 543, + 421 + ], + "spans": [ + { + "bbox": [ + 303, + 300, + 543, + 421 + ], + "type": "text", + "content": "To rigorously evaluate DC's effectiveness, we focus on challenging tasks where contemporary state-of-the-art LLMs, such as GPT-4o and Claude 3.5, still face limitations. Rather than evaluating on benchmarks where performance is near saturation (e.g., BBH (Suzgun et al., 2023b), MGSM (Shi et al., 2023), GSM8K (Cobbe et al., 2021)), we prioritize tasks that demand multi-step reasoning, heuristic search, strategic adaptation, and cumulative learning—that is, tasks in which iterative memory refinement can yield tangible improvements over time." + }, + { + "bbox": [ + 303, + 300, + 543, + 421 + ], + "type": "inline_equation", + "content": "^{8}" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 303, + 426, + 543, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 426, + 543, + 463 + ], + "spans": [ + { + "bbox": [ + 303, + 426, + 543, + 463 + ], + "type": "text", + "content": "Overall, the selected datasets include algorithmic, logical, and domain-specific reasoning tasks, each chosen to stress-test the model's ability to refine its reasoning over time." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "spans": [ + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "text", + "content": "(a) AIME 2020-2025 Exam Questions: The American Invitational Mathematics Examination (AIME) is a prestigious high-school competition featuring complex problems across algebra, combinatorics, number theory, geometry, and probability. These questions require deep mathematical reasoning and multi-step problem-solving. We consider three subsets: AIME " + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "inline_equation", + "content": "2024^{9}" + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "text", + "content": " (30 questions), AIME " + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "inline_equation", + "content": "2025^{10}" + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "text", + "content": " (30 questions), and AIME " + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "inline_equation", + "content": "2020 - 2024^{11}" + }, + { + "bbox": [ + 303, + 468, + 543, + 564 + ], + "type": "text", + "content": " (133 questions)." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 593, + 289, + 614 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 593, + 289, + 614 + ], + "spans": [ + { + "bbox": [ + 52, + 593, + 289, + 614 + ], + "type": "text", + "content": "1We used OpenAI's text-embedding-3-small model to map input queries (raw questions) to embedding vectors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "spans": [ + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "text", + "content": "2We set " + }, + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "text", + "content": " to 3 in all our experiments. (Initially, we considered higher top- " + }, + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 52, + 614, + 289, + 635 + ], + "type": "text", + "content": " values such as 5 and 7, but the gain was insignificant.)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 635, + 290, + 686 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 635, + 290, + 686 + ], + "spans": [ + { + "bbox": [ + 52, + 635, + 290, + 686 + ], + "type": "text", + "content": "3Please refer to Figure 12 to see the full instruction (prompt) used in BLh. We experimented with the zero-shot CoT approach (Kojima et al., 2022) in our preliminary experiments, but it did not yield any gains (Arcuschin et al., 2025). We, therefore, did not include it as a baseline method in our experiments." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 686, + 291, + 716 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 686, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 52, + 686, + 291, + 716 + ], + "type": "text", + "content": "4We adopt the generator prompt template used in DC-RS, namely Figure 13, for DC- " + }, + { + "bbox": [ + 52, + 686, + 291, + 716 + ], + "type": "inline_equation", + "content": "\\emptyset" + }, + { + "bbox": [ + 52, + 686, + 291, + 716 + ], + "type": "text", + "content": " , though we replace the memory placeholder with the text \"empty cheatsheet)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 571, + 542, + 602 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 571, + 542, + 602 + ], + "spans": [ + { + "bbox": [ + 303, + 571, + 542, + 602 + ], + "type": "text", + "content": "5We consider and test this baseline only on AIME 2024 and AIME 2025, which are relatively small in their size (each contains 30 examples) compared to other benchmarks." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 303, + 602, + 541, + 633 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 602, + 541, + 633 + ], + "spans": [ + { + "bbox": [ + 303, + 602, + 541, + 633 + ], + "type": "text", + "content": "6We use the generator prompt template in Figure 13 again, but include the entire raw input-output pairs from the previous steps in the memory—without any curation, truncation, or synthesis." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 303, + 633, + 543, + 654 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 633, + 543, + 654 + ], + "spans": [ + { + "bbox": [ + 303, + 633, + 543, + 654 + ], + "type": "inline_equation", + "content": "^{7}\\mathrm{FH}" + }, + { + "bbox": [ + 303, + 633, + 543, + 654 + ], + "type": "text", + "content": " is similar to DR, but we include only a select (most relevant) input-output pairs in the memory content." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 303, + 654, + 542, + 675 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 654, + 542, + 675 + ], + "spans": [ + { + "bbox": [ + 303, + 654, + 542, + 675 + ], + "type": "text", + "content": "8We release all the original input-output pairs in our codebase: http://github.com/suzgunmirac/dynamic-cheatsheet." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 318, + 675, + 535, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 675, + 535, + 685 + ], + "spans": [ + { + "bbox": [ + 318, + 675, + 535, + 685 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 318, + 675, + 535, + 685 + ], + "type": "text", + "content": "huggingface.co/datasets/HuggingFaceH4/aime_2024" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 318, + 686, + 523, + 696 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 686, + 523, + 696 + ], + "spans": [ + { + "bbox": [ + 318, + 686, + 523, + 696 + ], + "type": "text", + "content": "10huggingface.co/datasets/yentinglin/aime_2025." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 318, + 696, + 553, + 708 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 696, + 553, + 708 + ], + "spans": [ + { + "bbox": [ + 318, + 696, + 553, + 708 + ], + "type": "text", + "content": "11huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 163 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 163 + ], + "type": "text", + "content": "(b) GPQA-Diamond (Rein et al., 2024): A high-quality, difficult subset of the Graduate-Level Google-Proof Q&A (GPQA) benchmark, GPQA-Diamond contains 198 expert-validated questions across natural sciences, including biology, chemistry, and physics. These questions were correctly answered by domain experts but often missed by non-experts, making them ideal for evaluating DC's ability to handle complex, multi-hop reasoning tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 168, + 291, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 168, + 291, + 277 + ], + "spans": [ + { + "bbox": [ + 52, + 168, + 291, + 277 + ], + "type": "text", + "content": "(c) Game of 24 (Yao et al., 2023; Suzgun & Kalai, 2024): A heuristic-driven arithmetic challenge where the objective is to form an expression that evaluates to 24 using four given numbers exactly once. For instance, if the input values were \"7 7 8 11,\" one valid answer would be \"8*(7+7-11).\" This task emphasizes systematic search, strategic reasoning, and pattern recognition. We use the 100 examples from (Suzgun & Kalai, 2024) to assess DC's capacity for refining computational heuristics and strategy over manual attempts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 282, + 291, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 282, + 291, + 378 + ], + "spans": [ + { + "bbox": [ + 52, + 282, + 291, + 378 + ], + "type": "text", + "content": "(d) Math Equation Balancer: Focused on elementary arithmetic reasoning, this dataset requires the model to complete equations by inserting the appropriate operators to form valid expressions. The task emphasizes the sequential placement of operators, as illustrated by the example “1 ? 2 ? 3 = 6,” where the model must identify the correct operators to satisfy the equation (“1 + 2 + 3 = 6” or “1 * 2 * 3 = 6”). We compiled 250 arithmetic expressions for this task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 384, + 291, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 384, + 291, + 456 + ], + "spans": [ + { + "bbox": [ + 52, + 384, + 291, + 456 + ], + "type": "text", + "content": "(e) MMLU-Pro (Engineering and Physics) (Wang et al., 2024b): A professional-level subset of the MMLU benchmark focused on physics and engineering. All questions are presented in a multiple-choice form. The original dataset contains 1,299 physics and 969 engineering questions. We sampled 250 questions from each subset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 469, + 149, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 469, + 149, + 481 + ], + "spans": [ + { + "bbox": [ + 53, + 469, + 149, + 481 + ], + "type": "text", + "content": "3.2. Language Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 487, + 291, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 487, + 291, + 559 + ], + "spans": [ + { + "bbox": [ + 52, + 487, + 291, + 559 + ], + "type": "text", + "content": "We evaluate the efficacy of DC across a range of language models. Our selection includes both state-of-the-art LLMs such as GPT-4o and Claude 3.5 Sonnet and their smaller-scale counterparts (namely, GPT-4o-mini and Claude 3.5 Haiku), as well as models such as DeepSeek R1 that are designed specifically for reasoning-intensive tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 573, + 159, + 584 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 573, + 159, + 584 + ], + "spans": [ + { + "bbox": [ + 53, + 573, + 159, + 584 + ], + "type": "text", + "content": "3.3. Evaluation Protocol" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 591, + 291, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 591, + 291, + 639 + ], + "spans": [ + { + "bbox": [ + 52, + 591, + 291, + 639 + ], + "type": "text", + "content": "To ensure standardized and reliable evaluation, all models are instructed to format their final answers in a structured, machine-readable format. All model answers are expected to be wrapped in the following XML-style tags:" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 72, + 643, + 144, + 677 + ], + "blocks": [ + { + "bbox": [ + 72, + 643, + 144, + 677 + ], + "lines": [ + { + "bbox": [ + 72, + 643, + 144, + 677 + ], + "spans": [ + { + "bbox": [ + 72, + 643, + 144, + 677 + ], + "type": "text", + "content": " (final answer) " + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 52, + 681, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 681, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 681, + 291, + 717 + ], + "type": "text", + "content": "This explicit format ensures accurate and consistent parsing, eliminating errors arising from extraneous text or ambiguous outputs. Once extracted, the final answers are evaluated" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 67, + 529, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 529, + 79 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 529, + 79 + ], + "type": "text", + "content": "using their corresponding task-specific accuracy metric." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 91, + 409, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 409, + 102 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 409, + 102 + ], + "type": "text", + "content": "3.3.1. Accuracy Metrics" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 110, + 542, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 110, + 542, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 110, + 542, + 133 + ], + "type": "text", + "content": "Given the diversity of the tasks, we use different accuracy metrics tailored to the specific requirements of each dataset." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 139, + 543, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 139, + 543, + 212 + ], + "spans": [ + { + "bbox": [ + 304, + 139, + 543, + 212 + ], + "type": "text", + "content": "Soft Match (SM) is a lenient metric that considers an answer correct if it matches the ground truth after ignoring minor formatting differences, such as punctuation or whitespace variations. We apply this metric to GPQA-Diamond, and MMLU Pro (Engineering and Physics), in which questions are presented in a multiple-choice format." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 217, + 543, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 217, + 543, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 217, + 543, + 289 + ], + "type": "text", + "content": "Functionally Correct (FC) is an even more flexible metric that evaluates whether the model's output satisfies the task-specific constraints, even if the exact numeral presentation or formatting differs slightly from the reference solution. We apply this metric to the Game of 24, Math Equation Balancer, and AIME benchmarks." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 304, + 388, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 388, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 388, + 317 + ], + "type": "text", + "content": "4. Main Results" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 325, + 504, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 504, + 349 + ], + "type": "text", + "content": "4.1. DC enables test-time learning and reduces repetitive errors" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "spans": [ + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "text", + "content": "One of the most compelling illustrations of DC's capabilities emerges from the Game of 24 task. As seen in Table 1, GPT-4o's baseline accuracy on this arithmetic puzzle was just " + }, + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "text", + "content": ". Under DC-RS, its performance increased to " + }, + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 303, + 355, + 543, + 499 + ], + "type": "text", + "content": ", illustrating DC's capacity for test-time learning and iterative refinement. Early in the task sequence, GPT-4o discovered a reliable, Python-based brute-force method to solve Game of 24 and later on recognized the repetitive structure of the problem. The model then encoded this approach into its memory. Once established, GPT-4o consistently retrieved and applied the more or less same Python solution for subsequent examples, leading to rapid and accurate results." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "spans": [ + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "text", + "content": "The performance under DC-" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "text", + "content": " (19%) further highlights the positive impact of memory curation and retrieval. DC-" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "text", + "content": " uses the same core generator but keeps the memory empty, thus lacking the mechanism to store and reuse solutions. The large gap between 19% (DC-" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 303, + 505, + 542, + 612 + ], + "type": "text", + "content": ") and 99% (DC-RS) confirms that effective memory usage, in which past solutions are retrieved and generalized, is the main driver of GPT-4o's transformation from ad-hoc solver to near-perfect performer in Game of 24." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "spans": [ + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "text", + "content": "In contrast, Claude 3.5 Sonnet showed marginal gain, moving from " + }, + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 303, + 618, + 543, + 703 + ], + "type": "text", + "content": ". Despite DC's scaffolding, Claude did not internalize a generalized approach but instead continued to rely on manual arithmetic solutions. This underscores that while DC provides the framework for test-time adaptation, its ultimate success hinges on the model's innate capacity to identify and encode robust, reusable strategies." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 64, + 523, + 201 + ], + "blocks": [ + { + "bbox": [ + 72, + 64, + 523, + 201 + ], + "lines": [ + { + "bbox": [ + 72, + 64, + 523, + 201 + ], + "spans": [ + { + "bbox": [ + 72, + 64, + 523, + 201 + ], + "type": "table", + "html": "
TasksClaude 3.5 SonnetGPT-4o
BLDC-∅DRDC-Cu.DC-RSBLDC-∅DRDC-Cu.DC-RS
AIME 202423.336.743.350.046.720.036.726.736.740.0
AIME 20256.723.323.336.730.06.710.010.016.720.0
AIME 2020–246.730.139.138.440.69.824.124.120.324.8
Game of 2412.010.011.014.014.010.019.06.093.099.0
GPQA Diamond59.660.163.661.168.757.157.155.158.157.1
Math Eqn. Balancer44.856.460.410097.850.088.010010099.2
MMLU Pro Eng.61.257.265.266.867.653.251.648.844.051.2
MMLU Pro Physics74.075.680.477.682.075.670.875.670.475.2
", + "image_path": "b2dd0d0d80362dd89fd243809f6becce8148dccb76a5cfdb8c154a007b7f43d8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "lines": [ + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "spans": [ + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "text", + "content": "Table 1: Performance comparison of Dynamic Cheatsheet (DC) variants for Claude 3.5 Sonnet and GPT-4o across multiple benchmarks. BL (Baseline): standard inference without memory; DC-∅ (Empty Memory): includes structured problem-solving and explicit tool-use instructions but no memory retention mechanism; DR (Dynamic Retrieval): uses retrieval but lacks curated memory updates; DC-Cu (Cumulative Memory): iteratively accumulates model solutions but lacks retrieval; and DC-RS (Retrieval & Synthesis): combines retrieval with memory refinement/synthesis. These results highlight substantial accuracy gains under DC: Claude 3.5 Sonnet's AIME 2024 accuracy jumps by " + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "text", + "content": " under DC-Cu, and GPT-4o's Game of 24 accuracy leaps from " + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 51, + 210, + 543, + 271 + ], + "type": "text", + "content": " under DC-RS." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 278, + 267, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 278, + 267, + 303 + ], + "spans": [ + { + "bbox": [ + 52, + 278, + 267, + 303 + ], + "type": "text", + "content": "4.2. DC provides substantial improvements across various challenging reasoning benchmarks" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 309, + 290, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 309, + 290, + 357 + ], + "spans": [ + { + "bbox": [ + 52, + 309, + 290, + 357 + ], + "type": "text", + "content": "Beyond Game of 24, DC yielded significant gains across a range of complex mathematical and algorithmic tasks. See Table 1. The results below illustrate how iterative solution reuse can helpful in complex reasoning problems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "spans": [ + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": "AIME Exam Problems. The AIME exams provided some of the most dramatic improvements under DC. For Claude 3.5 Sonnet, performance on AIME 2020-2024 surged from " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "6.7\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "40.6\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": " under DC-RS. A similar upward trend appeared on AIME 2024 (23.3% to " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": ") and AIME 2025 (6.7% to " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "36.7\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": ") under DC-Cu. DC-Cu, where the model curates memory after processing the input and does not involve a retrieval stage, also proved potent in recent exam sets, achieving highest accuracy scores in AIME 2024 and 2025. GPT-4o also showed some noteworthy gains. Its AIME 2024 performance raised from " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "20.0\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "40.0\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": " under DC-RS, while its AIME 2025 score climbed from " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "6.7\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "inline_equation", + "content": "20.0\\%" + }, + { + "bbox": [ + 52, + 363, + 291, + 531 + ], + "type": "text", + "content": ". These boosts suggest that structured test-time-produced memory can help tackle difficult math problems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": "GPQA-Diamond. On GPQA-Diamond, Claude 3.5 Sonnet improved from " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "59.6\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "68.7\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " under DC-RS, a robust " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "9.1\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " gain purely from test-time adaptation. DR " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "(63.6\\%)" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " demonstrated that retrieval alone helps, but the further jump to " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "68.7\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " highlights how memory curation and synthesis can yield additional benefits. By contrast, GPT-4o experienced only a slight increase from " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "57.1\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "inline_equation", + "content": "58.1\\%" + }, + { + "bbox": [ + 52, + 536, + 291, + 693 + ], + "type": "text", + "content": " with DC-RS; our quantitative analysis of the model's outputs and memory showed us that retrieval can, in some cases, introduce confusion, especially if suboptimal examples are recalled. This contrast between different models underscores how the success of retrieval-based adaptation partly depends on model-specific generation and curation capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 697, + 291, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 697, + 291, + 710 + ], + "spans": [ + { + "bbox": [ + 52, + 697, + 291, + 710 + ], + "type": "text", + "content": "Math Equation Balancer. As Table 1 shows, the base-" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "spans": [ + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "text", + "content": "line performance for Claude 3.5 Sonnet (44.8%) rose to " + }, + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "inline_equation", + "content": "98 - 100\\%" + }, + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "text", + "content": " with DC-RS and DC-Cu, while GPT-4o similarly improved from " + }, + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 303, + 278, + 543, + 363 + ], + "type": "text", + "content": " to near-perfect accuracy (99-100%). As observed in Game of 24, the models quickly learned an algorithmic or Python-based balancing routine, stored it in external memory, and repeatedly retrieved it, achieving exceptional consistency once the core method was established." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "spans": [ + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "text", + "content": "MMLU-Pro Tasks. For MMLU-Pro Eng. and Physics, Claude 3.5 Sonnet exhibited consistent gains, rising by up to " + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "inline_equation", + "content": "8.0\\%" + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "text", + "content": " in Physics (from " + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "inline_equation", + "content": "74\\%" + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "inline_equation", + "content": "82\\%" + }, + { + "bbox": [ + 303, + 368, + 544, + 499 + ], + "type": "text", + "content": "). Our examination of the curated memory entries shows that Claude temporarily stored and retrieved compact \"reference guides\" on engineering and physics principles, which might have proved beneficial for thematically similar questions. GPT-4o, on the other hand, observed slight decreases from the baseline on these tasks, suggesting that domain complexity and baseline knowledge gaps may attenuate DC's benefits if curated memory is less reliable or consistent." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 513, + 536, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 513, + 536, + 538 + ], + "spans": [ + { + "bbox": [ + 304, + 513, + 536, + 538 + ], + "type": "text", + "content": "4.3. Memory curation (DC) fosters generalization and provides gains over full-history-appending (FH)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "spans": [ + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": "Whereas FH (full-history) simply appends every previous dialogue turn into the prompt, DC actively filters and synthesizes high-value content. As shown in Table 2, Sonnet under FH reached " + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "inline_equation", + "content": "26.7\\%" + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": " accuracy in 2024 questions, while DC-based methods hit " + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": ". Similarly, GPT-4o managed a baseline of " + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "inline_equation", + "content": "20.0\\%" + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": " but fell to " + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "inline_equation", + "content": "6.7\\%" + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": " using FH, in direct contrast to " + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "inline_equation", + "content": "40.0\\%" + }, + { + "bbox": [ + 303, + 544, + 544, + 712 + ], + "type": "text", + "content": " with DC-RS. Excessive uncurated input-output pairs can not only overwhelm the model's context window, dilute crucial insights and hamper retrieval efficiency, but also significantly increase inference costs over time. On the other hand, DC's selective memory curation ensures that problem-solving tips or code snippets remain readily accessible without clutter, thus facilitating more robust and consistent improvements across consecutive queries." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 283, + 375 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 283, + 375 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 283, + 375 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 283, + 375 + ], + "type": "image", + "image_path": "965700a9c72784f9f8a2105c6bbe3acde1bc883831ff2ac484f5b48008c4be46.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "lines": [ + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "spans": [ + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "type": "text", + "content": "Figure 5: Excerpt from GPT-4o's external memory after processing 100 examples from Game of 24 under DC-RS. Early in the test sequence, the model discovered a Python-based brute-force solution, stored it, and subsequently retrieved it for subsequent puzzles. This shift to structured code reuse resulted in a dramatic performance increase from " + }, + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 52, + 392, + 291, + 464 + ], + "type": "text", + "content": " accuracy, eliminating arithmetic errors and redundant problem-solving efforts." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 478, + 274, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 478, + 274, + 490 + ], + "spans": [ + { + "bbox": [ + 52, + 478, + 274, + 490 + ], + "type": "text", + "content": "4.4. DC fosters efficient tool usage / code generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 496, + 291, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 496, + 291, + 604 + ], + "spans": [ + { + "bbox": [ + 52, + 496, + 291, + 604 + ], + "type": "text", + "content": "A successful behavior under DC is the LLMs' inclination toward code generation to handle computationally intensive tasks. GPT-4o's near-complete reliance on Python scripts for Game of 24 exemplifies this shift. Rather than performing manual arithmetic repeatedly, GPT-4o recognized that code-based brute force is more systematic. It generated, stored, and iteratively refined a Python function that tested permutations of numbers and operations, allowing it to solve each instance of Game of 24 with high accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 609, + 293, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 609, + 293, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 609, + 293, + 718 + ], + "type": "text", + "content": "This inclination toward automation illustrates DC's potential to nurture efficient tool-usage: the capacity to recognize when external tools (e.g., Python, symbolic math engines, or dedicated solvers) are more robust than internally verbalized chain-of-thought calculations. While we restricted the scope of tool usage to Python interpreter in this study, future expansions could easily explore a broader suite of tools, potentially amplifying LLM performance in specialized domains such as computational biology or legal research." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 307, + 64, + 541, + 124 + ], + "blocks": [ + { + "bbox": [ + 307, + 64, + 541, + 124 + ], + "lines": [ + { + "bbox": [ + 307, + 64, + 541, + 124 + ], + "spans": [ + { + "bbox": [ + 307, + 64, + 541, + 124 + ], + "type": "table", + "html": "
TasksClaude 3.5 SonnetGPT-4o
BLFHDC-Cu.BLFHDC-RS
AIME 202423.326.750.020.013.340.0
AIME 20256.76.736.76.73.320.0
", + "image_path": "4c761fb056c52ac2dda0586b9ecf8f17216ffcf9839d5398281625731563d48e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 131, + 542, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 131, + 542, + 202 + ], + "spans": [ + { + "bbox": [ + 304, + 131, + 542, + 202 + ], + "type": "text", + "content": "Table 2: Performance breakdown of BL (default baseline), FH (full history), DC-Cu, and DC-RS approaches under AIME 2024 and 2025. FH stores all past queries and outputs, while DC-Cu and DC-RS selectively refine stored memory. Results indicate that targeted memory curation in DC-RS leads to greater accuracy gains compared to full history retention, supporting the need for structured, self-updating knowledge mechanisms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 211, + 537, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 211, + 537, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 211, + 537, + 224 + ], + "type": "text", + "content": "4.5. Model scale and capacity impact DC effectiveness" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 229, + 542, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 229, + 542, + 302 + ], + "spans": [ + { + "bbox": [ + 303, + 229, + 542, + 302 + ], + "type": "text", + "content": "Our current results indicate that the effectiveness of DC is strongly tied to the model's scale and underlying generative capacity. While Claude 3.5 Sonnet and GPT-4o showed notable gains across multiple tasks under DC, their smaller counterparts, Claude 3.5 Haiku and GPT-4o-mini, showed more limited and inconsistent gains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "spans": [ + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": "Table 3, for instance, shows that Claude 3.5 Haiku achieved moderate gains under DC, with its accuracy on AIME 2024 rising from " + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "10.0\\%" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " (baseline) to " + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "36.7\\%" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " under DC-Cu. But gains on AIME 2025 were weaker, reaching only " + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "13.3\\%" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " under DC-" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " and DC-Cu. Interestingly, GPQA-Diamond saw an improvement from " + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "43.4\\%" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "inline_equation", + "content": "49.0\\%" + }, + { + "bbox": [ + 303, + 307, + 544, + 380 + ], + "type": "text", + "content": " under DC-RS," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 406, + 446, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 406, + 446, + 415 + ], + "spans": [ + { + "bbox": [ + 312, + 406, + 446, + 415 + ], + "type": "text", + "content": "GENERAL META-REASONING STRATEGIES" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 417, + 365, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 417, + 365, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 417, + 365, + 425 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 430, + 357, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 430, + 357, + 439 + ], + "spans": [ + { + "bbox": [ + 313, + 430, + 357, + 439 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 440, + 494, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 440, + 494, + 448 + ], + "spans": [ + { + "bbox": [ + 313, + 440, + 494, + 448 + ], + "type": "text", + "content": "Systematic Problem Analysis Framework (Reference: Q1-Q20)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 449, + 425, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 425, + 457 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 425, + 457 + ], + "type": "text", + "content": "For complex mathematical problems:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 458, + 528, + 567 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 313, + 458, + 425, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 458, + 425, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 458, + 425, + 467 + ], + "type": "text", + "content": "1. State problem requirements clearly" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 468, + 457, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 468, + 457, + 476 + ], + "spans": [ + { + "bbox": [ + 313, + 468, + 457, + 476 + ], + "type": "text", + "content": "2. List key observations and theorems applicable" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 476, + 424, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 476, + 424, + 485 + ], + "spans": [ + { + "bbox": [ + 313, + 476, + 424, + 485 + ], + "type": "text", + "content": "3. Identify patterns and relationships" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 486, + 433, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 433, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 433, + 495 + ], + "type": "text", + "content": "4. Break into manageable sub-problems" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 495, + 392, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 495, + 392, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 495, + 392, + 503 + ], + "type": "text", + "content": "5. Verify against examples" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 504, + 528, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 528, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 528, + 513 + ], + "type": "text", + "content": "6. Consider computational approach when analytical solution is complex" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 514, + 507, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 507, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 507, + 521 + ], + "type": "text", + "content": "7. For grid problems, analyze movement patterns and symmetries" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 522, + 514, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 522, + 514, + 531 + ], + "spans": [ + { + "bbox": [ + 313, + 522, + 514, + 531 + ], + "type": "text", + "content": "8. For combinatorial problems, use appropriate counting techniques" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 532, + 449, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 532, + 449, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 532, + 449, + 540 + ], + "type": "text", + "content": "9. Implement verification code when possible" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 540, + 430, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 430, + 549 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 430, + 549 + ], + "type": "text", + "content": "10. Consider edge cases and constraints" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 550, + 492, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 550, + 492, + 559 + ], + "spans": [ + { + "bbox": [ + 313, + 550, + 492, + 559 + ], + "type": "text", + "content": "11. For grid coloring problems, consider row/column patterns" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 559, + 359, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 559, + 359, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 559, + 359, + 567 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 571, + 350, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 571, + 350, + 580 + ], + "spans": [ + { + "bbox": [ + 313, + 571, + 350, + 580 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 581, + 379, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 581, + 379, + 590 + ], + "spans": [ + { + "bbox": [ + 313, + 581, + 379, + 590 + ], + "type": "text", + "content": "Example application:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 590, + 451, + 653 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 313, + 590, + 433, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 590, + 433, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 590, + 433, + 598 + ], + "type": "text", + "content": "1. Requirements: list all given conditions" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 599, + 449, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 599, + 449, + 607 + ], + "spans": [ + { + "bbox": [ + 313, + 599, + 449, + 607 + ], + "type": "text", + "content": "2. Observations: identify applicable theorems" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 608, + 441, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 608, + 441, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 608, + 441, + 616 + ], + "type": "text", + "content": "3. Patterns: look for structural relationships" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 617, + 414, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 414, + 626 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 414, + 626 + ], + "type": "text", + "content": "4. Sub-problems: break into steps" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 313, + 627, + 422, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 627, + 422, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 627, + 422, + 635 + ], + "type": "text", + "content": "5. Verification: test against examples" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 313, + 635, + 451, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 635, + 451, + 644 + ], + "spans": [ + { + "bbox": [ + 313, + 635, + 451, + 644 + ], + "type": "text", + "content": "6. Implementation: use Python for verification" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 313, + 645, + 351, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 645, + 351, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 645, + 351, + 653 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 658, + 368, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 368, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 368, + 666 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 313, + 666, + 344, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 344, + 674 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 344, + 674 + ], + "type": "text", + "content": "Count: 20" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 304, + 687, + 542, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 687, + 542, + 728 + ], + "spans": [ + { + "bbox": [ + 304, + 687, + 542, + 728 + ], + "type": "text", + "content": "Figure 6: Example of Claude 3.5 Sonnet's curated memory after processing 20 AIME 2024 questions under DC-Cu. The memory captures key solution strategies, enables the model to generalize across similar computational problems, and boosts its accuracy." + } + ] + } + ], + "index": 41 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 425, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 42 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 67, + 294, + 208 + ], + "blocks": [ + { + "bbox": [ + 58, + 67, + 294, + 208 + ], + "lines": [ + { + "bbox": [ + 58, + 67, + 294, + 208 + ], + "spans": [ + { + "bbox": [ + 58, + 67, + 294, + 208 + ], + "type": "image", + "image_path": "929c40871840b64eea8a65bb6e6edf2caf541d593b184d1645fa4b7013b9c21a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 215, + 543, + 257 + ], + "lines": [ + { + "bbox": [ + 51, + 215, + 543, + 257 + ], + "spans": [ + { + "bbox": [ + 51, + 215, + 543, + 257 + ], + "type": "text", + "content": "Figure 7: Cumulative performance progression under DC for GPQA-Diamond (left) and Game of 24 (right). In GPQA-Diamond, Claude 3.5 Sonnet steadily improves as it accumulates relevant knowledge snippets (the first few points are noisy because " + }, + { + "bbox": [ + 51, + 215, + 543, + 257 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 51, + 215, + 543, + 257 + ], + "type": "text", + "content": " measures cumulative accuracy). Meanwhile, in Game of 24, GPT-4o rapidly transitions from trial-and-error arithmetic to near-perfect performance once it recognizes and stores a Python-based solution. These trends highlight DC's ability to enhance accuracy via iterative test-time learning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 299, + 66, + 536, + 209 + ], + "blocks": [ + { + "bbox": [ + 299, + 66, + 536, + 209 + ], + "lines": [ + { + "bbox": [ + 299, + 66, + 536, + 209 + ], + "spans": [ + { + "bbox": [ + 299, + 66, + 536, + 209 + ], + "type": "image", + "image_path": "0410e0cc4f37998875e3a12d1df04dd9e6a45d2a2ddd8cacbd54e80f3efd76b0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 263, + 290, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 263, + 290, + 287 + ], + "spans": [ + { + "bbox": [ + 52, + 263, + 290, + 287 + ], + "type": "text", + "content": "suggesting that retrieval-based adaptation might still provide utility in smaller models." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 65, + 300, + 278, + 443 + ], + "blocks": [ + { + "bbox": [ + 65, + 300, + 278, + 443 + ], + "lines": [ + { + "bbox": [ + 65, + 300, + 278, + 443 + ], + "spans": [ + { + "bbox": [ + 65, + 300, + 278, + 443 + ], + "type": "table", + "html": "
TasksClaude 3.5 Haiku
BLDC-∅DC-Cu.DC-RS
AIME 202410.026.736.730.0
AIME 20250.013.313.310.0
GPQA-Diamond43.441.943.749.0
TasksGPT-4o-mini
BLDC-∅DC-Cu.DC-RS
AIME 202416.720.013.313.3
AIME 202510.013.313.316.7
GPQA-Diamond34.334.333.832.3
", + "image_path": "9d6affcf252b705e810fdd65349c4205d86943abc5b929094e9861ab0e1c47f1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 449, + 290, + 521 + ], + "lines": [ + { + "bbox": [ + 52, + 449, + 290, + 521 + ], + "spans": [ + { + "bbox": [ + 52, + 449, + 290, + 521 + ], + "type": "text", + "content": "Table 3: Performance of Claude 3.5 Haiku and GPT-4o-mini, the smaller counterparts of Claude 3.5 Sonnet and GPT-4o, across AIME (2024, 2025) and GPQA-Diamond. These smaller models struggle to fully leverage DC, suggesting that memory-based adaptation is most effective when the base LM has sufficient generative competence. Performance improvements are more muted, highlighting the dependency of DC on model-scale reasoning ability." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "spans": [ + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "text", + "content": "That said, GPT-4o-mini (Table 3) showed even smaller gains, with some variants leading to slight declines in performance. On AIME 2024, DC- " + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "text", + "content": " provided a " + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "inline_equation", + "content": "20.0\\%" + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "text", + "content": " boost, but both DC-Cu and DC-RS performed worse than baseline. AIME 2025 showed a minor improvement, peaking at " + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "inline_equation", + "content": "16.7\\%" + }, + { + "bbox": [ + 52, + 525, + 291, + 634 + ], + "type": "text", + "content": " under DC-RS. On GPQA-Diamond, GPT-4o-mini's performance, however, remained largely stagnant or slightly declined under memory-based adaptation, suggesting that it struggled to leverage stored information effectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 639, + 286, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 639, + 286, + 651 + ], + "spans": [ + { + "bbox": [ + 52, + 639, + 286, + 651 + ], + "type": "text", + "content": "These imply two drawbacks of smaller models under DC:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 658, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 658, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 658, + 291, + 717 + ], + "type": "text", + "content": "(a) Generative competence. For DC to be effective, the base model must produce correct solutions with sufficient frequency to populate the memory with high-quality, reusable strategies. Smaller models, such as GPT-4o-mini and Claude 3.5 Haiku, generate correct solutions less reliably," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 263, + 543, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 543, + 300 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 543, + 300 + ], + "type": "text", + "content": "leading to a sparse or low-quality memory repository. As a result, iterative refinement stalls because the stored knowledge consists mostly of incorrect or partial attempts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 304, + 544, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 544, + 425 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 544, + 425 + ], + "type": "text", + "content": "(b) Contextual and memory curation limitations. Smaller models struggle with long-context understanding/generation and memory retrieval, leading to inefficient or irrelevant memory usage. Unlike their larger counterparts, which can more effectively retrieve and synthesize solutions from stored heuristics, smaller models often fail to retrieve the most relevant past solutions or misapply retrieved knowledge to new problems. This results in inconsistent performance under DC-RS, particularly in tasks requiring complex reasoning or strategic adaptation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 437, + 542, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 542, + 462 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 542, + 462 + ], + "type": "text", + "content": "4.6. Test-time task similarity and example ordering can amplify DC's overall impact" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 468, + 544, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 468, + 544, + 635 + ], + "spans": [ + { + "bbox": [ + 303, + 468, + 544, + 635 + ], + "type": "text", + "content": "Another central insight is that DC thrives when test examples share structural similarities. In both Game of 24 and Math Equation Balancer, once GPT-4o identified an efficient solution, it reused it consistently for subsequent tasks. Similarly, in AIME, discovering a geometry or combinatorics strategy allowed for easy transfer across questions of analogous structure. Consequently, tasks arranged to present related questions early may accelerate and improve the model's test-time learning. This suggests that curriculum-style learning (Bengio et al., 2009), where simpler or archetypal problems are presented first to build a repository of valid heuristics, may potentially bootstrap performance. Cf. (Lopez-Paz & Ranzato, 2017; Zelikman et al., 2022; Chen et al., 2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 651, + 507, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 651, + 507, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 651, + 507, + 665 + ], + "type": "text", + "content": "5. Additional Analyses and Discussions" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 671, + 542, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 671, + 542, + 708 + ], + "spans": [ + { + "bbox": [ + 303, + 671, + 542, + 708 + ], + "type": "text", + "content": "Reasoning and information efficiency. One key insight is that DC reduces the need to \"reinvent the wheel\" for each query. By encoding and reusing well-established techniques" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 290, + 128 + ], + "type": "text", + "content": "(e.g., Python-based solving for Game of 24), models can bypass repeated rediscovery of the same strategies. This significantly cuts down reasoning overhead and token usage in subsequent queries, though the initial cost of discovering a robust approach and curating it remains non-trivial." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "spans": [ + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": "DC performs better than majority voting (MV). To test if DC provides advantages over conventional MV at inference, we also tested Sonnet on AIME 2024 and 2025 using both approaches. MV, which selects the most common answer from three independent generations, yielded no improvements over single-shot inference. As seen in Table 4, on AIME 2024, MV performed identically to the baseline " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "(23.3\\%)" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": ", while on AIME 2025, it remained at " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "6.7\\%" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": ", offering no tangible gain. Even with DC-" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "\\emptyset" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": ", MV slightly underperformed " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "(33.3\\%" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "36.7\\%)" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": ". In contrast, DC-Cu outperformed MV, reaching " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": " on AIME 2024 and " + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "inline_equation", + "content": "36.7\\%" + }, + { + "bbox": [ + 53, + 133, + 291, + 324 + ], + "type": "text", + "content": " on AIME 2025. Unlike MV, which passively aggregates outputs, DC actively refines knowledge over time, eliminating errors and improving solution quality. This confirms that memory-based adaptation is far more effective than simple statistical voting in complex reasoning tasks." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 55, + 335, + 290, + 393 + ], + "blocks": [ + { + "bbox": [ + 55, + 335, + 290, + 393 + ], + "lines": [ + { + "bbox": [ + 55, + 335, + 290, + 393 + ], + "spans": [ + { + "bbox": [ + 55, + 335, + 290, + 393 + ], + "type": "table", + "html": "
TasksClaude 3.5 Sonnet
BLMV(BL)DC-∅MV(DC-∅)DC-Cu.
AIME 202423.323.3336.733.350.0
AIME 20256.76.723.323.336.7
", + "image_path": "a96fc554773f447b0b92412be8f2e3f8819c76f4e33c639b283090006003112c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 400, + 290, + 412 + ], + "lines": [ + { + "bbox": [ + 52, + 400, + 290, + 412 + ], + "spans": [ + { + "bbox": [ + 52, + 400, + 290, + 412 + ], + "type": "text", + "content": "Table 4: Comparison of majority voting (MV) with DC on AIME." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 423, + 290, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 423, + 290, + 532 + ], + "spans": [ + { + "bbox": [ + 52, + 423, + 290, + 532 + ], + "type": "text", + "content": "Clustering of errors and corrections. Our experiments suggest that errors and their corrections often cluster in a latent embedding space. See Figure 10. Once a model acquires a high-quality heuristic for a cluster of related queries, it can apply this knowledge to tightly embedded neighbors. However, faulty heuristics that slip into memory can be equally amplified. Ensuring that the memory remains \"clean\" thus requires careful curation and, if necessary, pruning to avoid propagating erroneous strategies." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 536, + 290, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 290, + 657 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 290, + 657 + ], + "type": "text", + "content": "Transferability of memory content across models. We also observed that larger models, such as Claude 3.5 Sonnet and GPT-4o, can sometimes produce higher-quality strategies that, in principle, could benefit smaller models if the memory is transferred. However, if a smaller model lacks the generative capacity to interpret or refine those strategies correctly, its performance can stall or degrade. In our ablation experiments, we observed mixed results. This indicates that memory entries, while helpful, cannot fully compensate for inadequate base capability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 662, + 290, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 662, + 290, + 698 + ], + "spans": [ + { + "bbox": [ + 52, + 662, + 290, + 698 + ], + "type": "text", + "content": "Long-context generation versus understanding. Most large LLMs excel at processing lengthy inputs but struggle to generate comparably long" + }, + { + "bbox": [ + 52, + 662, + 290, + 698 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 52, + 662, + 290, + 698 + ], + "type": "text", + "content": " and well-organized outputs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 67, + 542, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 67, + 542, + 175 + ], + "spans": [ + { + "bbox": [ + 303, + 67, + 542, + 175 + ], + "type": "text", + "content": "DC's memory curation after each query can demand precise reproduction or modification of prior knowledge. We observed instances where the model merely references or abbreviates the existing memory (e.g., \"Previous content [...] preserved\") instead of explicitly rewriting it. Such truncated memory updates can reduce the quality of stored heuristics over time. Potential solutions include maintaining a structured, external database that the LM can reference without regenerating large swaths of text each time." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 180, + 542, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 180, + 542, + 312 + ], + "spans": [ + { + "bbox": [ + 303, + 180, + 542, + 312 + ], + "type": "text", + "content": "Retrieval bottlenecks and noise. While retrieval-based variants (e.g., DC-RS) can substantially improve accuracy, poorly filtered retrieval mechanisms can introduce confusion, particularly when presented with highly diverse or loosely related queries. For example, in our experiments, GPT-4o's performance occasionally dipped in GPQA-Diamond due to suboptimal retrieval choices. This underscores the importance of robust retrieval methods (e.g., dense vector search, advanced ranking algorithms) that can reliably surface higher quality exemplars or heuristics while suppressing irrelevant or contradictory texts." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 318, + 542, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 318, + 542, + 426 + ], + "spans": [ + { + "bbox": [ + 303, + 318, + 542, + 426 + ], + "type": "text", + "content": "Hierarchical and modular memory. As LLM deployments scale, specialized domains may benefit from subdividing or hierarchically organizing memory. For instance, a system could maintain separate curated memories for topics like combinatorics or physics, each updated by a specialized retrieval or curation mechanism. This may reduce the load on a unified memory store and help isolate errors within their respective domains, with the goal of further improving the clarity and reliability of retrieved heuristics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 432, + 542, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 432, + 542, + 515 + ], + "spans": [ + { + "bbox": [ + 303, + 432, + 542, + 515 + ], + "type": "text", + "content": "Time and token complexity. Although DC requires memory curation after each query, it optimizes efficiency over time by reducing redundant computation and token usage.[13] As the model retrieves and refines solutions, memory maintenance becomes a net gain rather than a cost. However, its sequential structure still poses challenges for large-scale parallel or batch tasks requiring independent inference." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 521, + 542, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 521, + 542, + 677 + ], + "spans": [ + { + "bbox": [ + 303, + 521, + 542, + 677 + ], + "type": "text", + "content": "Smaller or more specialized models and R1 experiments. Finally, we note that smaller models, such as GPT-4o-mini, show limited gains under DC, as seen in Table 3. Additional experiments with \"R1\" models such as DeepSeek R1 and o1 similarly showed minimal or inconsistent improvements. In these cases, these models' generative ability appears too restricted to produce reliable strategies for storage or to interpret retrieved heuristics effectively. The solutions were far too verbose and long. Without sufficiently accurate and efficient base solutions, memory curation cannot yield substantial gains. This limitation ties back to the core premise that effective DC demands a capable foundation model to seed and refine the curated knowledge." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 304, + 684, + 541, + 704 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 684, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 684, + 541, + 704 + ], + "type": "text", + "content": "13On AIME 2024, Claude Sonnet averaged 370 tokens under BL, 494 under DC-" + }, + { + "bbox": [ + 304, + 684, + 541, + 704 + ], + "type": "inline_equation", + "content": "\\emptyset" + }, + { + "bbox": [ + 304, + 684, + 541, + 704 + ], + "type": "text", + "content": ", 1035 under DC-RS, and 1831 under DC-Cu." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 64, + 704, + 174, + 716 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 704, + 174, + 716 + ], + "spans": [ + { + "bbox": [ + 64, + 704, + 174, + 716 + ], + "type": "text", + "content": "12See, e.g., (Liu et al., 2024b)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 186 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 186 + ], + "type": "text", + "content": "Overall, DC offers a useful and practical framework for continuous, test-time learning in LLMs. Our findings emphasize the synergy between model capacity and memory curation, the importance of structural task similarity and retrieval precision, and the benefits of offloading repeated computations to flexible external stores (e.g., Python scripts). At the same time, alternative mechanisms (e.g., specialized sub-memories or adaptive example ordering) and more sophisticated retrieval techniques (e.g., topological clustering) remain promising directions for further research." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 202, + 149, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 149, + 216 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 149, + 216 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 222, + 291, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 222, + 291, + 295 + ], + "spans": [ + { + "bbox": [ + 52, + 222, + 291, + 295 + ], + "type": "text", + "content": "We thank Batu El, Sabri Eyuboglu, Tayfun Gur, Emily Shen, Jake Silberg, Elana Simon, and Kyle Swanson for their helpful comments and suggestions. We also thank the members of the James Zou Lab at Stanford for their feedback in the early stages of this project. Suzgun gratefully acknowledges the support of an HAI-SAP Fellowship." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 310, + 111, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 310, + 111, + 322 + ], + "spans": [ + { + "bbox": [ + 53, + 310, + 111, + 322 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 329, + 291, + 716 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 329, + 291, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 329, + 291, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 329, + 291, + 353 + ], + "type": "text", + "content": "Amari, S.-I. Natural gradient works efficiently in learning. Neural computation, 10(2):251-276, 1998." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 366, + 291, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 366, + 291, + 426 + ], + "spans": [ + { + "bbox": [ + 53, + 366, + 291, + 426 + ], + "type": "text", + "content": "Arcuschin, I., Janiak, J., Krzyzanowski, R., Rajamanoharan, S., Nanda, N., and Conmy, A. Chain-of-thought reasoning in the wild is not always faithful. In Workshop on Reasoning and Planning for Large Language Models, 2025. URL https://openreview.net/forum?id=L8094Whth0." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 438, + 291, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 438, + 291, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 438, + 291, + 487 + ], + "type": "text", + "content": "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 499, + 291, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 499, + 291, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 499, + 291, + 546 + ], + "type": "text", + "content": "Bengio, Y., Louradour, J., Collobert, R., and Weston, J. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pp. 41-48, 2009." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 561, + 291, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 561, + 291, + 633 + ], + "spans": [ + { + "bbox": [ + 53, + 561, + 291, + 633 + ], + "type": "text", + "content": "Besta, M., Blach, N., Kubicek, A., Gerstenberger, R., Podstawski, M., Gianinazzi, L., Gajda, J., Lehmann, T., Niewiadomski, H., Nczyk, P., et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 645, + 291, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 645, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 53, + 645, + 291, + 716 + ], + "type": "text", + "content": "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206-2240. PMLR, 2022." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 543, + 717 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 306, + 67, + 543, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 543, + 102 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 543, + 102 + ], + "type": "text", + "content": "Bottou, L. and Cun, Y. Large scale online learning. Advances in neural information processing systems, 16, 2003." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 113, + 542, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 113, + 542, + 148 + ], + "spans": [ + { + "bbox": [ + 306, + 113, + 542, + 148 + ], + "type": "text", + "content": "Bottou, L. and Le Cun, Y. On-line learning for very large data sets. Applied stochastic models in business and industry, 21(2):137-151, 2005." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 158, + 542, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 158, + 542, + 205 + ], + "spans": [ + { + "bbox": [ + 306, + 158, + 542, + 205 + ], + "type": "text", + "content": "Boudiaf, M., Mueller, R., Ben Ayed, I., and Bertinetto, L. Parameter-free online test-time adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8344-8353, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 215, + 541, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 215, + 541, + 250 + ], + "spans": [ + { + "bbox": [ + 306, + 215, + 541, + 250 + ], + "type": "text", + "content": "Bulatov, A., Kuratov, Y., and Burtsev, M. Recurrent memory transformer. Advances in Neural Information Processing Systems, 35:11079-11091, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 259, + 542, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 259, + 542, + 296 + ], + "spans": [ + { + "bbox": [ + 306, + 259, + 542, + 296 + ], + "type": "text", + "content": "Chen, Z., Deng, Y., Yuan, H., Ji, K., and Gu, Q. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 305, + 542, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 305, + 542, + 364 + ], + "spans": [ + { + "bbox": [ + 306, + 305, + 542, + 364 + ], + "type": "text", + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 374, + 542, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 374, + 542, + 422 + ], + "spans": [ + { + "bbox": [ + 306, + 374, + 542, + 422 + ], + "type": "text", + "content": "Feng, T., Han, P., Lin, G., Liu, G., and You, J. Thought-retriever: Don't just retrieve raw data, retrieve thoughts, 2024. URL https://openreview.net/forum?id=SkDNQbMQba." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 431, + 542, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 431, + 542, + 479 + ], + "spans": [ + { + "bbox": [ + 306, + 431, + 542, + 479 + ], + "type": "text", + "content": "Feng, Y., Li, F., Song, Z., Zheng, B., and Koehn, P. Learn to remember: Transformer with recurrent memory for document-level machine translation. arXiv preprint arXiv:2205.01546, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 488, + 542, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 488, + 542, + 536 + ], + "spans": [ + { + "bbox": [ + 306, + 488, + 542, + 536 + ], + "type": "text", + "content": "Golovneva, O., O'Brien, S., Pasunuru, R., Wang, T., Zettlemoyer, L., Fazel-Zarandi, M., and Celikyilmaz, A. Pathfinder: Guided search over multi-step reasoning paths. arXiv preprint arXiv:2312.05180, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 545, + 542, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 545, + 542, + 593 + ], + "spans": [ + { + "bbox": [ + 306, + 545, + 542, + 593 + ], + "type": "text", + "content": "Gou, Z., Shao, Z., Gong, Y., Shen, Y., Yang, Y., Duan, N., and Chen, W. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 603, + 541, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 603, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 306, + 603, + 541, + 627 + ], + "type": "text", + "content": "Graves, A. Generating sequences with recurrent neural networks. arXiv preprint arXiv:1308.0850, 2013." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 636, + 541, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 636, + 541, + 660 + ], + "spans": [ + { + "bbox": [ + 306, + 636, + 541, + 660 + ], + "type": "text", + "content": "Graves, A., Wayne, G., and Danihelka, I. Neural Turing machines. arXiv preprint arXiv:1410.5401, 2014." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "type": "text", + "content": "Gururangan, S., Marasovic, A., Swayamdipta, S., Lo, K., Beltagy, I., Downey, D., and Smith, N. A. Don't stop pretraining: Adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964, 2020." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 716 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 114 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 114 + ], + "type": "text", + "content": "Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929-3938. PMLR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 122, + 291, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 122, + 291, + 170 + ], + "spans": [ + { + "bbox": [ + 53, + 122, + 291, + 170 + ], + "type": "text", + "content": "He, Z., Karlinsky, L., Kim, D., McAuley, J., Krotov, D., and Feris, R. Camelot: Towards large language models with training-free consolidated associative memory. arXiv preprint arXiv:2402.13449, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 177, + 291, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 177, + 291, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 177, + 291, + 213 + ], + "type": "text", + "content": "Joulin, A. and Mikolov, T. Inferring algorithmic patterns with stack-augmented recurrent nets. Advances in neural information processing systems, 28, 2015." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 220, + 291, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 220, + 291, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 220, + 291, + 255 + ], + "type": "text", + "content": "Karpicke, J. D. and Blunt, J. R. Retrieval practice produces more learning than elaborative studying with concept mapping. Science, 331(6018):772-775, 2011." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 262, + 291, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 262, + 291, + 297 + ], + "spans": [ + { + "bbox": [ + 53, + 262, + 291, + 297 + ], + "type": "text", + "content": "Karpicke, J. D. and Roediger III, H. L. The critical importance of retrieval for learning. science, 319(5865): 966-968, 2008." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 305, + 291, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 305, + 291, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 305, + 291, + 353 + ], + "type": "text", + "content": "Karpukhin, V., Oguz, B., Min, S., Lewis, P. S., Wu, L., Edunov, S., Chen, D., and Yih, W.-t. Dense passage retrieval for open-domain question answering. In EMNLP (1), pp. 6769-6781, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 360, + 291, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 360, + 291, + 419 + ], + "spans": [ + { + "bbox": [ + 53, + 360, + 291, + 419 + ], + "type": "text", + "content": "Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=Hk1BjCEKvH." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 426, + 291, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 426, + 291, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 426, + 291, + 474 + ], + "type": "text", + "content": "Kojima, T., Gu, S. S., Reid, M., Matsuo, Y., and Iwasawa, Y. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35: 22199-22213, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 481, + 291, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 481, + 291, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 481, + 291, + 517 + ], + "type": "text", + "content": "Krause, B., Kahembwe, E., Murray, I., and Renals, S. Dynamic evaluation of transformer language models. arXiv preprint arXiv:1904.08378, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 524, + 291, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 524, + 291, + 582 + ], + "spans": [ + { + "bbox": [ + 53, + 524, + 291, + 582 + ], + "type": "text", + "content": "Lazaridou, A., Gribovskaya, E., Stokowiec, W. J., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering, 2023. URL https://openreview.net/forum?id=hFCUPkSSRE." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 590, + 291, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 590, + 291, + 650 + ], + "spans": [ + { + "bbox": [ + 53, + 590, + 291, + 650 + ], + "type": "text", + "content": "Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., Kuttler, H., Lewis, M., Yih, W.-t., Rocktaschel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in neural information processing systems, 33:9459-9474, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "text", + "content": "Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics, 12:157-173, 2024a." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 67, + 542, + 716 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 306, + 67, + 542, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 542, + 102 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 542, + 102 + ], + "type": "text", + "content": "Liu, X., Dong, P., Hu, X., and Chu, X. Longgenbench: Long-context generation benchmark. arXiv preprint arXiv:2410.04199, 2024b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 112, + 542, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 112, + 542, + 160 + ], + "spans": [ + { + "bbox": [ + 305, + 112, + 542, + 160 + ], + "type": "text", + "content": "Liu, Y., Kothari, P., Van Delft, B., Bellot-Gurlet, B., Mordan, T., and Alahi, A. Ttt++: When does self-supervised test-time training fail or thrive? Advances in Neural Information Processing Systems, 34:21808-21820, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 169, + 542, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 169, + 542, + 193 + ], + "spans": [ + { + "bbox": [ + 305, + 169, + 542, + 193 + ], + "type": "text", + "content": "Long, J. Large language model guided tree-of-thought. arXiv preprint arXiv:2305.08291, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 202, + 542, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 202, + 542, + 238 + ], + "spans": [ + { + "bbox": [ + 305, + 202, + 542, + 238 + ], + "type": "text", + "content": "Lopez-Paz, D. and Ranzato, M. Gradient episodic memory for continual learning. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 247, + 542, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 247, + 542, + 306 + ], + "spans": [ + { + "bbox": [ + 305, + 247, + 542, + 306 + ], + "type": "text", + "content": "Lu, P., Peng, B., Cheng, H., Galley, M., Chang, K.-W., Wu, Y. N., Zhu, S.-C., and Gao, J. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36: 43447-43478, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 316, + 542, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 316, + 542, + 375 + ], + "spans": [ + { + "bbox": [ + 305, + 316, + 542, + 375 + ], + "type": "text", + "content": "Madaan, A., Tandon, N., Clark, P., and Yang, Y. Memory-assisted prompt editing to improve gpt-3 after deployment. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2833–2861, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 384, + 542, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 542, + 444 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 542, + 444 + ], + "type": "text", + "content": "Madaan, A., Tandon, N., Gupta, P., Hallinan, S., Gao, L., Wegreffe, S., Alon, U., Dziri, N., Prabhumoye, S., Yang, Y., et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 453, + 542, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 453, + 542, + 502 + ], + "spans": [ + { + "bbox": [ + 305, + 453, + 542, + 502 + ], + "type": "text", + "content": "McCloskey, M. and Cohen, N. J. Catastrophic interference in connectionist networks: The sequential learning problem. In Psychology of learning and motivation, volume 24, pp. 109-165. Elsevier, 1989." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 510, + 542, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 510, + 542, + 557 + ], + "spans": [ + { + "bbox": [ + 305, + 510, + 542, + 557 + ], + "type": "text", + "content": "Mikolov, T., Karafiát, M., Burget, L., Cernocký, J., and Khudanpur, S. Recurrent neural network based language model. In *Interspeech*, volume 2, pp. 1045–1048. Makuhari, 2010." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 567, + 542, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 567, + 542, + 603 + ], + "spans": [ + { + "bbox": [ + 305, + 567, + 542, + 603 + ], + "type": "text", + "content": "Munkhdalai, T., Sordoni, A., Wang, T., and Trischler, A. Metalearned neural memory. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 612, + 542, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 612, + 542, + 660 + ], + "spans": [ + { + "bbox": [ + 305, + 612, + 542, + 660 + ], + "type": "text", + "content": "Niu, S., Wu, J., Zhang, Y., Chen, Y., Zheng, S., Zhao, P., and Tan, M. Efficient test-time model adaptation without forgetting. In International conference on machine learning, pp. 16888-16905. PMLR, 2022." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 669, + 542, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 669, + 542, + 716 + ], + "spans": [ + { + "bbox": [ + 305, + 669, + 542, + 716 + ], + "type": "text", + "content": "Qin, Y., Liang, S., Ye, Y., Zhu, K., Yan, L., Lu, Y., Lin, Y., Cong, X., Tang, X., Qian, B., et al. Toollm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 718 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "type": "text", + "content": "Rannen-Triki, A., Bornschein, J., Pascanu, R., Hutter, M., György, A., Galashov, A., Teh, Y. W., and Titsias, M. K. Revisiting dynamic evaluation: Online adaptation for large language models. arXiv preprint arXiv:2403.01518, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 133, + 293, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 133, + 293, + 196 + ], + "spans": [ + { + "bbox": [ + 53, + 133, + 293, + 196 + ], + "type": "text", + "content": "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Ti67584b98." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 292, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 292, + 239 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 292, + 239 + ], + "type": "text", + "content": "Roediger, H. L. and Butler, A. C. The critical role of retrieval practice in long-term retention. Trends in cognitive sciences, 15(1):20-27, 2011." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 246, + 291, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 246, + 291, + 308 + ], + "spans": [ + { + "bbox": [ + 53, + 246, + 291, + 308 + ], + "type": "text", + "content": "Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N., and Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 314, + 291, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 291, + 376 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 291, + 376 + ], + "type": "text", + "content": "Shen, Y., Song, K., Tan, X., Li, D., Lu, W., and Zhuang, Y. HuggingGPT: Solving AI tasks with chatGPT and its friends in hugging face. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=yHdTscY6Ci." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 381, + 291, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 381, + 291, + 442 + ], + "spans": [ + { + "bbox": [ + 53, + 381, + 291, + 442 + ], + "type": "text", + "content": "Shi, F., Fried, D., Ghazvininejad, M., Zettlemoyer, L., and Wang, S. I. Natural language to code translation with execution. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 3533-3546, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 449, + 291, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 449, + 291, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 449, + 291, + 523 + ], + "type": "text", + "content": "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., Das, D., and Wei, J. Language models are multilingual chain-of-thought reasoners. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=fR3wGck-IXp." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 529, + 293, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 529, + 293, + 662 + ], + "spans": [ + { + "bbox": [ + 53, + 529, + 293, + 662 + ], + "type": "text", + "content": "Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. REPLUG: Retrievalaugmented black-box language models. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8371-8384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.463. URL https://aclanthology.org/2024.nacl-long.463/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 669, + 292, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 669, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 669, + 292, + 718 + ], + "type": "text", + "content": "Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36:8634-8652, 2023." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 543, + 718 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 306, + 67, + 543, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 543, + 117 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 543, + 117 + ], + "type": "text", + "content": "Sun, Y., Wang, X., Liu, Z., Miller, J., Efros, A., and Hardt, M. Test-time training with self-supervision for generalization under distribution shifts. In International conference on machine learning, pp. 9229-9248. PMLR, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 125, + 543, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 125, + 543, + 174 + ], + "spans": [ + { + "bbox": [ + 306, + 125, + 543, + 174 + ], + "type": "text", + "content": "Sun, Y., Li, X., Dalal, K., Xu, J., Vikram, A., Zhang, G., Dubois, Y., Chen, X., Wang, X., Koyejo, S., et al. Learning to (learn at test time): Rnns with expressive hidden states. arXiv preprint arXiv:2407.04620, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 182, + 543, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 182, + 543, + 232 + ], + "spans": [ + { + "bbox": [ + 306, + 182, + 543, + 232 + ], + "type": "text", + "content": "Surís, D., Menon, S., and Vondrick, C. Vipergpt: Visual inference via python execution for reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 11888-11898, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 240, + 543, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 543, + 277 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 543, + 277 + ], + "type": "text", + "content": "Suzgun, M. and Kalai, A. T. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 286, + 543, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 286, + 543, + 334 + ], + "spans": [ + { + "bbox": [ + 306, + 286, + 543, + 334 + ], + "type": "text", + "content": "Suzgun, M., Gehrmann, S., Belinkov, Y., and Shieber, S. M. Memory-augmented recurrent neural networks can learn generalized dyck languages. arXiv preprint arXiv:1911.03329, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 343, + 543, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 343, + 543, + 404 + ], + "spans": [ + { + "bbox": [ + 306, + 343, + 543, + 404 + ], + "type": "text", + "content": "Suzgun, M., Melas-Kyriazi, L., and Jurafsky, D. Follow the wisdom of the crowd: Effective text generation via minimum bayes risk decoding. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 4265-4293, 2023a." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 414, + 543, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 414, + 543, + 485 + ], + "spans": [ + { + "bbox": [ + 306, + 414, + 543, + 485 + ], + "type": "text", + "content": "Suzgun, M., Scales, N., Scharli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q., Chi, E., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, 2023b." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 495, + 543, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 543, + 556 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 543, + 556 + ], + "type": "text", + "content": "Suzgun, M., Shieber, S. M., and Jurafsky, D. string2string: A modern python library for string-to-string algorithms. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pp. 278-285, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 565, + 543, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 565, + 543, + 625 + ], + "spans": [ + { + "bbox": [ + 306, + 565, + 543, + 625 + ], + "type": "text", + "content": "Syed, N. A., Liu, H., and Sung, K. K. Handling concept drifts in incremental learning with support vector machines. In Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 317-321, 1999." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 635, + 543, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 543, + 660 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 543, + 660 + ], + "type": "text", + "content": "Thrun, S. and Mitchell, T. M. Lifelong robot learning. Robotics and autonomous systems, 15(1-2):25-46, 1995." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 669, + 543, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 669, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 306, + 669, + 543, + 718 + ], + "type": "text", + "content": "Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 717 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 102 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 102 + ], + "type": "text", + "content": "Wang, D., Shelhamer, E., Liu, S., Olshausen, B., and Darrell, T. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 110, + 291, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 110, + 291, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 110, + 291, + 182 + ], + "type": "text", + "content": "Wang, X., Wei, J., Schuurmans, D., Le, Q. V., Chi, E. H., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 190, + 291, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 190, + 291, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 190, + 291, + 237 + ], + "type": "text", + "content": "Wang, Y., Gao, Y., Chen, X., Jiang, H., Li, S., Yang, J., Yin, Q., Li, Z., Li, X., Yin, B., et al. Memoryllm: Towards self-updatable large language models. arXiv preprint arXiv:2402.04624, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 245, + 291, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 245, + 291, + 341 + ], + "spans": [ + { + "bbox": [ + 53, + 245, + 291, + 341 + ], + "type": "text", + "content": "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., Li, T., Ku, M., Wang, K., Zhuang, A., Fan, R., Yue, X., and Chen, W. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b. URL https://openreview.net/forum?id=y10DM6R2r3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 348, + 291, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 348, + 291, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 348, + 291, + 407 + ], + "type": "text", + "content": "Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 416, + 291, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 416, + 291, + 440 + ], + "spans": [ + { + "bbox": [ + 53, + 416, + 291, + 440 + ], + "type": "text", + "content": "Weston, J., Chopra, S., and Bordes, A. Memory networks. arXiv preprint arXiv:1410.3916, 2014." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 448, + 291, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 448, + 291, + 507 + ], + "spans": [ + { + "bbox": [ + 53, + 448, + 291, + 507 + ], + "type": "text", + "content": "Yang, L., Yu, Z., Zhang, T., Cao, S., Xu, M., Zhang, W., Gonzalez, J. E., and Cui, B. Buffer of thoughts: Thought-augmented reasoning with large language models. Advances in Neural Information Processing Systems, 37: 113519-113544, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 515, + 291, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 515, + 291, + 551 + ], + "spans": [ + { + "bbox": [ + 53, + 515, + 291, + 551 + ], + "type": "text", + "content": "Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T. L., Cao, Y., and Narasimhan, K. Tree of Thoughts: Deliberate problem solving with large language models, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 559, + 291, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 559, + 291, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 559, + 291, + 605 + ], + "type": "text", + "content": "Yuksekgonul, M., Bianchi, F., Boen, J., Liu, S., Lu, P., Huang, Z., Guestrin, C., and Zou, J. Optimizing generative ai by backpropagating language model feedback. Nature, 639:609-616, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 613, + 291, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 613, + 291, + 650 + ], + "spans": [ + { + "bbox": [ + 53, + 613, + 291, + 650 + ], + "type": "text", + "content": "Zelikman, E., Wu, Y., Mu, J., and Goodman, N. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "type": "text", + "content": "Zhang, K., Kang, Y., Zhao, F., and Liu, X. LLM-based medical assistant personalization with short- and long-term memory coordination. In Duh, K., Gomez, H., and Bethard, S. (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 67, + 542, + 421 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 315, + 67, + 542, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 542, + 127 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 542, + 127 + ], + "type": "text", + "content": "Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2386-2398, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naac1-long.132. URL https://aclanthology.org/2024.naac1-long.132/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 135, + 542, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 135, + 542, + 182 + ], + "spans": [ + { + "bbox": [ + 305, + 135, + 542, + 182 + ], + "type": "text", + "content": "Zhang, M., Levine, S., and Finn, C. Memo: Test time robustness via adaptation and augmentation. Advances in neural information processing systems, 35:38629-38642, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 190, + 542, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 190, + 542, + 250 + ], + "spans": [ + { + "bbox": [ + 305, + 190, + 542, + 250 + ], + "type": "text", + "content": "Zhang, T., Patil, S. G., Jain, N., Shen, S., Zaharia, M., Stoica, I., and Gonzalez, J. E. RAFT: Adapting language model to domain specific RAG. In First Conference on Language Modeling, 2024b. URL https://openreview.net/forum?id=rzQGHXNReU." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 258, + 542, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 258, + 542, + 354 + ], + "spans": [ + { + "bbox": [ + 305, + 258, + 542, + 354 + ], + "type": "text", + "content": "Zhong, Z., Lei, T., and Chen, D. Training language models with memory augmentation. In Goldberg, Y., Kozareva, Z., and Zhang, Y. (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5657-5673, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.382. URL https://aclanthology.org/2022.emnlp-main.382/." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 361, + 542, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 361, + 542, + 421 + ], + "spans": [ + { + "bbox": [ + 305, + 361, + 542, + 421 + ], + "type": "text", + "content": "Zhou, D., Scharli, N., Hou, L., Wei, J., Scales, N., Wang, X., Schuurmans, D., Cui, C., Bousquet, O., Le, Q., et al. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625, 2022." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 57 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 66, + 220, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 66, + 220, + 79 + ], + "spans": [ + { + "bbox": [ + 54, + 66, + 220, + 79 + ], + "type": "text", + "content": "A. Background & Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 87, + 225, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 87, + 225, + 99 + ], + "spans": [ + { + "bbox": [ + 54, + 87, + 225, + 99 + ], + "type": "text", + "content": "A.1. Test-time learning (online learning)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 106, + 290, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 106, + 290, + 355 + ], + "spans": [ + { + "bbox": [ + 54, + 106, + 290, + 355 + ], + "type": "text", + "content": "Test-time learning—also referred to as online or incremental learning (adaptation)—encompasses a family of methods in which a stochastic model updates its predictions by incorporating information seen during inference, without undergoing conventional, full-scale offline finetuning. Early versions of test-time adaptation focused on local or transductive learning, where a model re-fit or re-weighted its parameters with each new test instance or batch (McCloskey & Cohen, 1989; Thrun & Mitchell, 1995; Amari, 1998; Syed et al., 1999; Bottou & Cun, 2003; Bottou & Le Cun, 2005, inter alia). In computer vision, for example, methods like test-time training have been shown to mitigate domain shifts by optimizing a self-supervised loss on incoming data (Wang et al., 2020; Sun et al., 2020; Liu et al., 2021; Boudiaf et al., 2022; Niu et al., 2022; Zhang et al., 2022; Sun et al., 2024). In the context of natural-language generation, test-time adaptation has appeared under terms such as \"dynamic evaluation\" (Mikolov et al., 2010; Graves, 2013; Krause et al., 2019; Rannen-Triki et al., 2024), in which a language model is updated with gradient steps on the test-time data itself." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 363, + 290, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 363, + 290, + 529 + ], + "spans": [ + { + "bbox": [ + 54, + 363, + 290, + 529 + ], + "type": "text", + "content": "However, directly updating language model weights at test time can be computationally expensive and requires the capacity to modify parameters. For large-scale, black-box APIs (e.g., GPT-3 or Claude), one often lacks the ability to perform parameter updates easily, thereby making such an approach difficult, if not completely infeasible (Shi et al., 2024). To address this, a growing body of work has explored parameter-free adaptation, whereby one structurally modifies immediate model inputs (e.g., prompting) or draws from external memory to \"update\" the model's effective reasoning. Our approach aligns with this direction by allowing an LM to iteratively record solutions, explanations, or heuristics in an external memory component over successive interactions, avoiding weight updates entirely." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 536, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 536, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 536, + 290, + 715 + ], + "type": "text", + "content": "In the broader test-time learning literature, reflexive, compositional, and iterative refinement approaches like Reflexion (Shinn et al., 2023), Self-Refine (Madaan et al., 2023), (Self-)Critic (Gou et al., 2023), Chameleon (Lu et al., 2023), Meta-Prompting (Suzgun & Kalai, 2024), and Self-RAG (Asai et al., 2023) inter alia, use feedback loops or verification mechanisms to correct mistakes in solutions. TextGrad (Yuksekgonul et al., 2025) similarly draws on the notion of \"textual gradients\" as an alternative to parameter-based gradients and provides a pathway for improvement based on the content of mistakes. Our proposed DC framework differs by focusing explicitly on storing generalizable heuristics, solutions, or meta-level insights that can be repeatedly retrieved and applied across tasks, not just to correct a single solution. Furthermore, DC does not require a" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 68, + 541, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 68, + 541, + 102 + ], + "spans": [ + { + "bbox": [ + 307, + 68, + 541, + 102 + ], + "type": "text", + "content": "new training loop for each batch or scenario; instead, the memory itself is updated to reflect newly found solutions, errors, or strategies without touching the model weights." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 117, + 468, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 117, + 468, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 117, + 468, + 128 + ], + "type": "text", + "content": "A.2. Test-time compute and reasoning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 135, + 542, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 135, + 542, + 326 + ], + "spans": [ + { + "bbox": [ + 307, + 135, + 542, + 326 + ], + "type": "text", + "content": "It is now widely known and accepted that contemporary LLMs such as GPT-4 can exhibit substantial improvements in reasoning and generation capability when additional compute is devoted to inference-time strategies (e.g., chain-of-thought prompting (Wei et al., 2022; Kojima et al., 2022; Zhou et al., 2022), tree-of-thought expansions (Yao et al., 2023; Long, 2023), minimum Bayes risk decoding (Suzgun et al., 2023a; Shi et al., 2022; Golovneva et al., 2023), majority-vote sampling (Wang et al., 2023)). Prompting methods such as Tree-of-Thought (Yao et al., 2023), Graph-of-Thought (Besta et al., 2024), and other non-linear compositional reasoning paradigms systematically enlarge the inference-time search space. They allow models to explore various reasoning paths and exploit consensus or iterative corrections to arrive at more accurate and reliable conclusions (Wei et al., 2022; Wang et al., 2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 333, + 542, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 333, + 542, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 333, + 542, + 498 + ], + "type": "text", + "content": "However, these expansions come at the cost of increased computational overhead per test instance (Yao et al., 2023). They are, however, typically ephemeral: once a solution is generated, subsequent tasks or input samples do not generally benefit from the heavy compute spent earlier, unless the user manually engineers advanced prompt-sharing or in-context demonstration strategies. Cf. (Zelikman et al., 2022). Our work, on the other hand, aims to reduce repeated overhead across multiple test instances of a similar domain by building a memory that persists from one query to the next. This memory not only reduces repetitive mistakes, but also consolidates and codifies robust solution strategies—effectively amortizing or \"sharing\" the cost of initial reflection across future tasks.[14]" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 506, + 542, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 506, + 542, + 565 + ], + "spans": [ + { + "bbox": [ + 307, + 506, + 542, + 565 + ], + "type": "text", + "content": "Another related thread involves tool usage or code execution (Schick et al., 2023; Lu et al., 2023; Shen et al., 2023; Qin et al., 2023; Surís et al., 2023; Suzgun & Kalai, 2024). These studies have explored how LLMs can call external Python interpreters, symbolic solvers, or other specialized" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 307, + 573, + 542, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 573, + 542, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 573, + 542, + 713 + ], + "type": "text", + "content": "14Some lines of work—such as majority voting or sampling-based self-consistency—combine multiple inference passes for a single question but still lack a persistent knowledge base that spans different queries. DC differs in that we treat consecutive tasks in a sequence as a chance to refine a persistent, external store of learned lessons. The memory curation step selectively compiles relevant solutions, heuristics, expansions, or code blocks into a form that can be reused for upcoming queries. Thus, while the compute for the first few tasks may be higher, future tasks become simpler because the system can consult and adapt previously curated knowledge. This approach echoes the underlying motivation of test-time training—performing ongoing improvement at inference—but capitalizes on a cheap, external memory update in lieu of repeated or expensive parameter updates." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 175 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 175 + ], + "type": "text", + "content": "services and APIs to offload complex computations. Our empirical findings too illustrate that once an LLM under DC recognizes a systematic way (e.g., Python-based brute force algorithm) to handle a certain class of problems (like arithmetic puzzles), it can store that approach in memory and repeatedly retrieve it. Thus, DC not only invests extra compute in a single session but spreads that computational benefit across multiple interactions, effectively learning to use tools more consistently and reliably over time." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 188, + 274, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 188, + 274, + 201 + ], + "spans": [ + { + "bbox": [ + 52, + 188, + 274, + 201 + ], + "type": "text", + "content": "A.3. Memory-augmented generation and reasoning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 206, + 292, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 206, + 292, + 470 + ], + "spans": [ + { + "bbox": [ + 52, + 206, + 292, + 470 + ], + "type": "text", + "content": "Augmenting language models with external memory has seen renewed interest in recent years (Munkhdalai et al., 2019; Guu et al., 2020; Khandelwal et al., 2020; Bulatov et al., 2022; Borgeaud et al., 2022; Zhong et al., 2022; Feng et al., 2022; He et al., 2024; Wang et al., 2024a)—see also (Graves et al., 2014; Weston et al., 2014; Joulin & Mikolov, 2015; Suzgun et al., 2019) for early studies. Modern retrieval-augmented LLM approaches generally consult an external corpus of documents (i.e., a knowledge base) to improve factuality and reduce hallucination (Lewis et al., 2020; Lazaridou et al., 2023; Vu et al., 2023; Zhang et al., 2024b), but the retrieval corpus is almost always fixed prior to inference and does not evolve over time. These methods have been especially effective for open-domain question answering (Lewis et al., 2020; Guu et al., 2020; Karpukhin et al., 2020), where the model's own parameters may not hold all relevant knowledge. In practice, retrieval augmentation typically involves selecting and concatenating top-" + }, + { + "bbox": [ + 52, + 206, + 292, + 470 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 52, + 206, + 292, + 470 + ], + "type": "text", + "content": " passages from a knowledge-base—while useful for factual queries, the approach, however, does not inherently solve iterative improvement or learning from mistakes in the sense of building upon prior solutions at inference time." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 475, + 292, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 475, + 292, + 704 + ], + "spans": [ + { + "bbox": [ + 52, + 475, + 292, + 704 + ], + "type": "text", + "content": "Another line of research more closely aligns with our vision by storing not just reference knowledge but also the reasoning processes and solution strategies of language models. Several recent works have explored this direction. Thought-R retriever (Feng et al., 2024) logs the model's chain-of-thought from past queries and uses them for new, analogous queries. Buffer-of-Thoughts (BoT; Yang et al., 2025) takes a slightly different approach by distilling high-level \"thought templates\" from problem-solving processes, though it relies on predefined templates that seem to be tailored towards specific task types that were considered in their experiments. Madaan et al. (2022) have demonstrated that deployed models like GPT-3 can be improved through memory mechanisms that capture user feedback on errors, preventing similar mistakes in future interactions. Zhang et al. (2024a) have proposed a dual memory architecture combining long-term and short-term storage for medical applications, though their approach requires fine-tuning to incorporate new knowledge." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 67, + 544, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 544, + 210 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 544, + 210 + ], + "type": "text", + "content": "While these works reveal the many strategies for harnessing memory or feedback, DC emphasizes selectively storing the most relevant insights and heuristics. DC aims to avoid naive accumulation of full raw transcripts and ephemeral chain-of-thought expansions that can lead to memory bloat. Moreover, unlike methods that assume the model can be retrained or finetuned to incorporate memory items, we remain fully external and training-free; this aligns with \"plug-and-play\" usage principle, in which an off-the-shelf model is augmented by an external memory that it reads from and writes to, but does not require any gradient-based adaptation." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 224, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 224, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 224, + 79 + ], + "type": "text", + "content": "B. Additional Figures and Tables" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 338, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 338, + 99 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 338, + 99 + ], + "type": "text", + "content": "B.1. Performance Comparison of Baseline and DC-RS Approaches" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 165, + 111, + 430, + 320 + ], + "blocks": [ + { + "bbox": [ + 165, + 111, + 430, + 320 + ], + "lines": [ + { + "bbox": [ + 165, + 111, + 430, + 320 + ], + "spans": [ + { + "bbox": [ + 165, + 111, + 430, + 320 + ], + "type": "image", + "image_path": "3753abf54911b452cbec3a721e8f488ca140cb869fbf92935bdc43d355e6fecd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 331, + 542, + 352 + ], + "lines": [ + { + "bbox": [ + 52, + 331, + 542, + 352 + ], + "spans": [ + { + "bbox": [ + 52, + 331, + 542, + 352 + ], + "type": "text", + "content": "Figure 8: Overall performance of Claude 3.5 Sonnet under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 165, + 357, + 430, + 567 + ], + "blocks": [ + { + "bbox": [ + 165, + 357, + 430, + 567 + ], + "lines": [ + { + "bbox": [ + 165, + 357, + 430, + 567 + ], + "spans": [ + { + "bbox": [ + 165, + 357, + 430, + 567 + ], + "type": "image", + "image_path": "3bf7063caa0bbc6e0efa054f63a7152996963104001581e3d8f2f2908b61ebd9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 576, + 542, + 597 + ], + "lines": [ + { + "bbox": [ + 52, + 576, + 542, + 597 + ], + "spans": [ + { + "bbox": [ + 52, + 576, + 542, + 597 + ], + "type": "text", + "content": "Figure 9: Overall performance of GPT-40 under the baseline prompting approach with minimal instructions (Baseline) and Dynamic Cheatsheet with Retrieval & Synthesis (DC-RS)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 425, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 234, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 234, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 234, + 79 + ], + "type": "text", + "content": "B.2. Clustering of Errors and Corrections" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 62, + 109, + 533, + 404 + ], + "blocks": [ + { + "bbox": [ + 165, + 97, + 460, + 109 + ], + "lines": [ + { + "bbox": [ + 165, + 97, + 460, + 109 + ], + "spans": [ + { + "bbox": [ + 165, + 97, + 460, + 109 + ], + "type": "text", + "content": "tSNE Visualization of the Question Embeddings in GPQA Diamond" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 62, + 109, + 533, + 404 + ], + "lines": [ + { + "bbox": [ + 62, + 109, + 533, + 404 + ], + "spans": [ + { + "bbox": [ + 62, + 109, + 533, + 404 + ], + "type": "image", + "image_path": "cbe8ce8c8a02f346ead1b1918f9cf394af0d8137154ff8de1bebe61ee04b97ea.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 420, + 543, + 452 + ], + "lines": [ + { + "bbox": [ + 51, + 420, + 543, + 452 + ], + "spans": [ + { + "bbox": [ + 51, + 420, + 543, + 452 + ], + "type": "text", + "content": "Figure 10: t-SNE visualization of the embeddings of the raw questions in GPQA-Diamond. Note that correct and incorrect answers often cluster in latent embedding space. DC can help transfer learned strategies within these clusters, but without careful curation, erroneous heuristics may also spread, thus requiring careful memory refinement and verification of solution strategies." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 321, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 321, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 321, + 79 + ], + "type": "text", + "content": "B.3. Evolution of Memory Content under Dynamic Cheatsheet" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 61, + 95, + 533, + 373 + ], + "blocks": [ + { + "bbox": [ + 61, + 95, + 533, + 373 + ], + "lines": [ + { + "bbox": [ + 61, + 95, + 533, + 373 + ], + "spans": [ + { + "bbox": [ + 61, + 95, + 533, + 373 + ], + "type": "image", + "image_path": "17cc3869468072f22e50c7c41c39565cf80065a240b2f2a75e0814101afcba71.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 388, + 544, + 420 + ], + "lines": [ + { + "bbox": [ + 52, + 388, + 544, + 420 + ], + "spans": [ + { + "bbox": [ + 52, + 388, + 544, + 420 + ], + "type": "text", + "content": "Figure 11: This figure illustrates how memory content of GPT-4o evolves over time in Game of 24, quantified using a longest-common-subsequence (LCS)-similarity metric (Suzgun et al., 2024) between consecutive states (measured at the word level). While both DC-Cu and DC-RS show high stability after the first few iterations, DC-Cu experiences slightly greater fluctuations in the second half of inference." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 79 + ], + "type": "text", + "content": "B.4. Solution Generator and Memory Curator Prompts" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 86, + 292, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 86, + 292, + 99 + ], + "spans": [ + { + "bbox": [ + 53, + 86, + 292, + 99 + ], + "type": "text", + "content": "B.4.1. Prompt Used by the Generator Model in Baseline" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 93, + 110, + 506, + 407 + ], + "blocks": [ + { + "bbox": [ + 93, + 110, + 506, + 407 + ], + "lines": [ + { + "bbox": [ + 93, + 110, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 93, + 110, + 506, + 407 + ], + "type": "image", + "image_path": "3b13b9e6e372b158774aa1a3b38e8955de1127886d5e63e67d9c3ef8f1a54762.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 416, + 543, + 449 + ], + "lines": [ + { + "bbox": [ + 52, + 416, + 543, + 449 + ], + "spans": [ + { + "bbox": [ + 52, + 416, + 543, + 449 + ], + "type": "text", + "content": "Figure 12: Prompt used in the baseline (BL) approach, where the model receives minimal instructions. The prompt simply asks the model to answer the given question without any structured guidance, additional reasoning steps, or tool-use encouragement. This setup represents a traditional one-off inference method, reflecting how LLMs typically operate by default." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 381, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 381, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 381, + 79 + ], + "type": "text", + "content": "B.4.2. Prompt Used by the Generator Model in DR, FH, and DC Approaches" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 159, + 97, + 256, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 97, + 256, + 105 + ], + "spans": [ + { + "bbox": [ + 159, + 97, + 256, + 105 + ], + "type": "text", + "content": "GENERATOR (PROBLEM SOLVER)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 159, + 107, + 433, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 107, + 433, + 115 + ], + "spans": [ + { + "bbox": [ + 159, + 107, + 433, + 115 + ], + "type": "text", + "content": "Instruction: You are an expert problem-solving assistant tasked with analyzing and solving various questions using" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 159, + 115, + 382, + 137 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 159, + 115, + 371, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 115, + 371, + 122 + ], + "spans": [ + { + "bbox": [ + 159, + 115, + 371, + 122 + ], + "type": "text", + "content": "a combination of your expertise and provided reference materials. Each task will include:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 160, + 122, + 258, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 122, + 258, + 129 + ], + "spans": [ + { + "bbox": [ + 160, + 122, + 258, + 129 + ], + "type": "text", + "content": "1. A specific question or problem to solve" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 159, + 129, + 382, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 129, + 382, + 137 + ], + "spans": [ + { + "bbox": [ + 159, + 129, + 382, + 137 + ], + "type": "text", + "content": "2. A cheatsheet containing relevant strategies, patterns, and examples from similar problems" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 148, + 232, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 148, + 232, + 156 + ], + "spans": [ + { + "bbox": [ + 160, + 148, + 232, + 156 + ], + "type": "text", + "content": "##1.ANALYSIS&STRATEGY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 160, + 159, + 386, + 188 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 160, + 159, + 324, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 159, + 324, + 167 + ], + "spans": [ + { + "bbox": [ + 160, + 159, + 324, + 167 + ], + "type": "text", + "content": "- Carefully analyze both the question and cheatsheet before starting" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 160, + 167, + 386, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 167, + 386, + 173 + ], + "spans": [ + { + "bbox": [ + 160, + 167, + 386, + 173 + ], + "type": "text", + "content": "- Search for and identify any applicable patterns, strategies, or examples within the cheatsheet" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 160, + 173, + 308, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 173, + 308, + 180 + ], + "spans": [ + { + "bbox": [ + 160, + 173, + 308, + 180 + ], + "type": "text", + "content": "- Create a structured approach to solving the problem at hand" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 160, + 180, + 339, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 180, + 339, + 188 + ], + "spans": [ + { + "bbox": [ + 160, + 180, + 339, + 188 + ], + "type": "text", + "content": "- Review and document any limitations in the provided reference materials" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 200, + 241, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 200, + 241, + 207 + ], + "spans": [ + { + "bbox": [ + 160, + 200, + 241, + 207 + ], + "type": "text", + "content": "## 2. SOLUTION DEVELOPMENT" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 160, + 209, + 353, + 239 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 160, + 209, + 353, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 209, + 353, + 217 + ], + "spans": [ + { + "bbox": [ + 160, + 209, + 353, + 217 + ], + "type": "text", + "content": "- Present your solution using clear, logical steps that others can follow and review" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 160, + 217, + 347, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 217, + 347, + 224 + ], + "spans": [ + { + "bbox": [ + 160, + 217, + 347, + 224 + ], + "type": "text", + "content": "- Explain your reasoning and methodology before presenting final conclusions" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 160, + 224, + 301, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 224, + 301, + 232 + ], + "spans": [ + { + "bbox": [ + 160, + 224, + 301, + 232 + ], + "type": "text", + "content": "- Provide detailed explanations for each step of the process" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 160, + 232, + 315, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 232, + 315, + 239 + ], + "spans": [ + { + "bbox": [ + 160, + 232, + 315, + 239 + ], + "type": "text", + "content": "- Check and verify all assumptions and intermediate calculations" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 251, + 233, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 251, + 233, + 258 + ], + "spans": [ + { + "bbox": [ + 160, + 251, + 233, + 258 + ], + "type": "text", + "content": "##3.PROGRAMMINGTASKS" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 160, + 260, + 221, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 260, + 221, + 267 + ], + "spans": [ + { + "bbox": [ + 160, + 260, + 221, + 267 + ], + "type": "text", + "content": "When coding is required:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 160, + 268, + 246, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 268, + 246, + 275 + ], + "spans": [ + { + "bbox": [ + 160, + 268, + 246, + 275 + ], + "type": "text", + "content": "- Write clean, efficient Python code" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 160, + 275, + 409, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 275, + 409, + 282 + ], + "spans": [ + { + "bbox": [ + 160, + 275, + 409, + 282 + ], + "type": "text", + "content": "- Follow the strict code formatting and execution protocol (always use the Python code formatting block;" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 160, + 282, + 409, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 282, + 409, + 289 + ], + "spans": [ + { + "bbox": [ + 160, + 282, + 409, + 289 + ], + "type": "text", + "content": "furthermore, after the code block, always explicitly request execution by appending: \"EXECUTE CODE!\":" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 160, + 290, + 188, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 290, + 188, + 297 + ], + "spans": [ + { + "bbox": [ + 160, + 290, + 188, + 297 + ], + "type": "text", + "content": "```\n``python" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 160, + 297, + 203, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 297, + 203, + 304 + ], + "spans": [ + { + "bbox": [ + 160, + 297, + 203, + 304 + ], + "type": "text", + "content": "Your code here" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 160, + 312, + 205, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 312, + 205, + 319 + ], + "spans": [ + { + "bbox": [ + 160, + 312, + 205, + 319 + ], + "type": "text", + "content": "EXECUTE CODE!" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 160, + 323, + 419, + 404 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 160, + 323, + 375, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 323, + 375, + 330 + ], + "spans": [ + { + "bbox": [ + 160, + 323, + 375, + 330 + ], + "type": "text", + "content": "- All required imports and dependencies should be clearly declared at the top of your code" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 160, + 331, + 339, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 331, + 339, + 338 + ], + "spans": [ + { + "bbox": [ + 160, + 331, + 339, + 338 + ], + "type": "text", + "content": "- Include clear inline comments to explain any complex programming logic" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 160, + 338, + 286, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 338, + 286, + 345 + ], + "spans": [ + { + "bbox": [ + 160, + 338, + 286, + 345 + ], + "type": "text", + "content": "- Perform result validation after executing your code" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 160, + 345, + 328, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 345, + 328, + 352 + ], + "spans": [ + { + "bbox": [ + 160, + 345, + 328, + 352 + ], + "type": "text", + "content": "- Apply optimization techniques from the cheatsheet when applicable" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 160, + 352, + 419, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 352, + 419, + 360 + ], + "spans": [ + { + "bbox": [ + 160, + 352, + 419, + 360 + ], + "type": "text", + "content": "- The code should be completely self-contained without external file dependencies—it should be ready to be" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 160, + 360, + 209, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 360, + 209, + 367 + ], + "spans": [ + { + "bbox": [ + 160, + 360, + 209, + 367 + ], + "type": "text", + "content": "executed right away" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 160, + 367, + 358, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 367, + 358, + 375 + ], + "spans": [ + { + "bbox": [ + 160, + 367, + 358, + 375 + ], + "type": "text", + "content": "- Do not include any placeholders, system-specific paths, or hard-coded local paths" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 160, + 375, + 295, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 375, + 295, + 382 + ], + "spans": [ + { + "bbox": [ + 160, + 375, + 295, + 382 + ], + "type": "text", + "content": "- Feel free to use standard and widely-used pip packages" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 160, + 382, + 309, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 382, + 309, + 389 + ], + "spans": [ + { + "bbox": [ + 160, + 382, + 309, + 389 + ], + "type": "text", + "content": "- Opt for alternative methods if errors persist during execution" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 160, + 389, + 350, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 389, + 350, + 397 + ], + "spans": [ + { + "bbox": [ + 160, + 389, + 350, + 397 + ], + "type": "text", + "content": "- Exclude local paths and engine-specific settings (e.g., avoid configurations like" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 160, + 397, + 290, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 397, + 290, + 404 + ], + "spans": [ + { + "bbox": [ + 160, + 397, + 290, + 404 + ], + "type": "text", + "content": "chess.engineSimpleEngine.popen_uci(\"/usr/bin/stockfish\")" + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 416, + 237, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 416, + 237, + 422 + ], + "spans": [ + { + "bbox": [ + 160, + 416, + 237, + 422 + ], + "type": "text", + "content": "## 4. FINAL ANSWER FORMAT" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 160, + 426, + 301, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 426, + 301, + 433 + ], + "spans": [ + { + "bbox": [ + 160, + 426, + 301, + 433 + ], + "type": "text", + "content": "ALWAYS present your final answer in the following format:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 160, + 434, + 203, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 434, + 203, + 441 + ], + "spans": [ + { + "bbox": [ + 160, + 434, + 203, + 441 + ], + "type": "text", + "content": "FINAL ANSWER:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 160, + 443, + 185, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 443, + 185, + 449 + ], + "spans": [ + { + "bbox": [ + 160, + 443, + 185, + 449 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 160, + 450, + 196, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 450, + 196, + 456 + ], + "spans": [ + { + "bbox": [ + 160, + 450, + 196, + 456 + ], + "type": "text", + "content": "(final answer)" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 160, + 457, + 187, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 457, + 187, + 464 + ], + "spans": [ + { + "bbox": [ + 160, + 457, + 187, + 464 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 160, + 465, + 361, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 465, + 361, + 473 + ], + "spans": [ + { + "bbox": [ + 160, + 465, + 361, + 473 + ], + "type": "text", + "content": "N.B. Make sure that the final answer is properly wrapped inside the block." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 160, + 475, + 391, + 498 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 160, + 475, + 331, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 475, + 331, + 483 + ], + "spans": [ + { + "bbox": [ + 160, + 475, + 331, + 483 + ], + "type": "text", + "content": "* For multiple-choice questions: Only provide the letter choice (e.g., (A))" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 160, + 483, + 315, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 483, + 315, + 491 + ], + "spans": [ + { + "bbox": [ + 160, + 483, + 315, + 491 + ], + "type": "text", + "content": "* For numerical answers: Only provide the final number (e.g., 42)" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 160, + 491, + 391, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 491, + 391, + 498 + ], + "spans": [ + { + "bbox": [ + 160, + 491, + 391, + 498 + ], + "type": "text", + "content": "* For other types of answers, including free-response answers: Provide the complete final answer" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 502, + 185, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 502, + 185, + 509 + ], + "spans": [ + { + "bbox": [ + 160, + 502, + 185, + 509 + ], + "type": "text", + "content": "Example:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 160, + 509, + 235, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 509, + 235, + 516 + ], + "spans": [ + { + "bbox": [ + 160, + 509, + 235, + 516 + ], + "type": "text", + "content": "Q: What is the meaning of life?" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 160, + 516, + 176, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 516, + 176, + 522 + ], + "spans": [ + { + "bbox": [ + 160, + 516, + 176, + 522 + ], + "type": "text", + "content": "A: [..]" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 160, + 523, + 203, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 523, + 203, + 529 + ], + "spans": [ + { + "bbox": [ + 160, + 523, + 203, + 529 + ], + "type": "text", + "content": "FINAL ANSWER:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 160, + 532, + 185, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 532, + 185, + 537 + ], + "spans": [ + { + "bbox": [ + 160, + 532, + 185, + 537 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 160, + 539, + 168, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 539, + 168, + 544 + ], + "spans": [ + { + "bbox": [ + 160, + 539, + 168, + 544 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 160, + 545, + 187, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 545, + 187, + 552 + ], + "spans": [ + { + "bbox": [ + 160, + 545, + 187, + 552 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 160, + 564, + 197, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 564, + 197, + 571 + ], + "spans": [ + { + "bbox": [ + 160, + 564, + 197, + 571 + ], + "type": "text", + "content": "CHEATSHEET:" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 160, + 578, + 203, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 578, + 203, + 586 + ], + "spans": [ + { + "bbox": [ + 160, + 578, + 203, + 586 + ], + "type": "text", + "content": "[CHEATSHEET]" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 160, + 587, + 185, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 587, + 185, + 593 + ], + "spans": [ + { + "bbox": [ + 160, + 587, + 185, + 593 + ], + "type": "text", + "content": "\"" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 160, + 610, + 270, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 610, + 270, + 618 + ], + "spans": [ + { + "bbox": [ + 160, + 610, + 270, + 618 + ], + "type": "text", + "content": "Now it is time to solve the following question." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 160, + 620, + 206, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 620, + 206, + 627 + ], + "spans": [ + { + "bbox": [ + 160, + 620, + 206, + 627 + ], + "type": "text", + "content": "CURRENTINPUT:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 160, + 635, + 197, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 635, + 197, + 643 + ], + "spans": [ + { + "bbox": [ + 160, + 635, + 197, + 643 + ], + "type": "text", + "content": "[QUESTION]" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 52, + 660, + 543, + 721 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 660, + 543, + 721 + ], + "spans": [ + { + "bbox": [ + 52, + 660, + 543, + 721 + ], + "type": "text", + "content": "Figure 13: Generator prompt used in the DR, FH, and DC approaches, where the model receives structured high-level instructions on solution development, strategy selection, and tool usage. This prompt explicitly encourages Python code generation and execution for computational tasks. Notably, this same structured prompt is used in all non-BL methods, including DC-Ø, DR, FH, DC-Cu, and DC-RS. We also remark that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define " + }, + { + "bbox": [ + 52, + 660, + 543, + 721 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 660, + 543, + 721 + ], + "type": "text", + "content": " as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift." + } + ] + } + ], + "index": 64, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 65 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 338, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 338, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 338, + 79 + ], + "type": "text", + "content": "B.4.3. Prompt Used by the Memory Curation Model under DC-RS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 145, + 97, + 212, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 97, + 212, + 104 + ], + "spans": [ + { + "bbox": [ + 145, + 97, + 212, + 104 + ], + "type": "text", + "content": "CHEATSHEET CURATOR" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 145, + 106, + 200, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 106, + 200, + 114 + ], + "spans": [ + { + "bbox": [ + 145, + 106, + 200, + 114 + ], + "type": "text", + "content": "Purpose and Goals" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 145, + 114, + 444, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 114, + 444, + 134 + ], + "spans": [ + { + "bbox": [ + 145, + 114, + 444, + 134 + ], + "type": "text", + "content": "You are responsible for maintaining, refining, and optimizing the Dynamic Cheatsheet, which serves as a compact yet evolving repository of problem-solving strategies, reusable code snippets, and meta-reasoning techniques. Your goal is to enhance the model's long-term performance by continuously updating the cheatsheet with high-value insights while filtering out redundant or trivial information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 145, + 135, + 421, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 135, + 421, + 142 + ], + "spans": [ + { + "bbox": [ + 145, + 135, + 421, + 142 + ], + "type": "text", + "content": "- The cheatsheet should include quick, accurate, reliable, and practical solutions to a range of technical and creative challenges." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 142, + 444, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 142, + 444, + 156 + ], + "spans": [ + { + "bbox": [ + 145, + 142, + 444, + 156 + ], + "type": "text", + "content": "- After seeing each input, you should improve the content of the cheatsheet, synthesizing lessons, insights, tricks, and errors learned from past problems and adapting to new challenges." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 145, + 164, + 205, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 164, + 205, + 171 + ], + "spans": [ + { + "bbox": [ + 145, + 164, + 205, + 171 + ], + "type": "text", + "content": "Core Responsibilities" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 145, + 173, + 219, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 173, + 219, + 180 + ], + "spans": [ + { + "bbox": [ + 145, + 173, + 219, + 180 + ], + "type": "text", + "content": "Selective Knowledge Retention:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 145, + 180, + 427, + 201 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 145, + 180, + 427, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 180, + 427, + 187 + ], + "spans": [ + { + "bbox": [ + 145, + 180, + 427, + 187 + ], + "type": "text", + "content": "- Preserve only high-value strategies, code blocks, insights, and reusable patterns that significantly contribute to problem-solving." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 145, + 187, + 341, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 187, + 341, + 194 + ], + "spans": [ + { + "bbox": [ + 145, + 187, + 341, + 194 + ], + "type": "text", + "content": "- Discard redundant, trivial, or highly problem-specific details that do not generalize well." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 145, + 194, + 374, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 194, + 374, + 201 + ], + "spans": [ + { + "bbox": [ + 145, + 194, + 374, + 201 + ], + "type": "text", + "content": "- Ensure that previously effective solutions remain accessible while incorporating new, superior methods." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 202, + 238, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 202, + 238, + 209 + ], + "spans": [ + { + "bbox": [ + 145, + 202, + 238, + 209 + ], + "type": "text", + "content": "Continuous Refinement & Optimization:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 145, + 209, + 359, + 229 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 145, + 209, + 359, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 209, + 359, + 217 + ], + "spans": [ + { + "bbox": [ + 145, + 209, + 359, + 217 + ], + "type": "text", + "content": "- Improve existing strategies by incorporating more efficient, elegant, or generalizable techniques." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 145, + 217, + 325, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 217, + 325, + 223 + ], + "spans": [ + { + "bbox": [ + 145, + 217, + 325, + 223 + ], + "type": "text", + "content": "- Remove duplicate entries or rephrase unclear explanations for better readability." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 145, + 223, + 318, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 223, + 318, + 229 + ], + "spans": [ + { + "bbox": [ + 145, + 223, + 318, + 229 + ], + "type": "text", + "content": "- Introduce new meta-strategies based on recent problem-solving experiences." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 232, + 206, + 238 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 232, + 206, + 238 + ], + "spans": [ + { + "bbox": [ + 145, + 232, + 206, + 238 + ], + "type": "text", + "content": "Structure & Organization:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 145, + 238, + 296, + 271 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 145, + 238, + 296, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 238, + 296, + 244 + ], + "spans": [ + { + "bbox": [ + 145, + 238, + 296, + 244 + ], + "type": "text", + "content": "- Maintain a well-organized cheatsheet with clearly defined sections:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 145, + 244, + 257, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 244, + 257, + 251 + ], + "spans": [ + { + "bbox": [ + 145, + 244, + 257, + 251 + ], + "type": "text", + "content": "- Reusable Code Snippets and Solution Strategies" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 145, + 251, + 231, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 251, + 231, + 258 + ], + "spans": [ + { + "bbox": [ + 145, + 251, + 231, + 258 + ], + "type": "text", + "content": "- General Problem-Solving Heuristics" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 145, + 258, + 237, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 258, + 237, + 264 + ], + "spans": [ + { + "bbox": [ + 145, + 258, + 237, + 264 + ], + "type": "text", + "content": "- Optimization Techniques & Edge Cases" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 145, + 264, + 231, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 264, + 231, + 271 + ], + "spans": [ + { + "bbox": [ + 145, + 264, + 231, + 271 + ], + "type": "text", + "content": "-Specialized Knowledge & Theorems" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 271, + 357, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 271, + 357, + 278 + ], + "spans": [ + { + "bbox": [ + 145, + 271, + 357, + 278 + ], + "type": "text", + "content": "- Use tagging (e.g., Q14, Q22) to reference previous problems that contributed to a given strategy." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 145, + 289, + 222, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 289, + 222, + 296 + ], + "spans": [ + { + "bbox": [ + 145, + 289, + 222, + 296 + ], + "type": "text", + "content": "Principles and Best Practices" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 145, + 297, + 339, + 331 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 145, + 297, + 227, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 297, + 227, + 304 + ], + "spans": [ + { + "bbox": [ + 145, + 297, + 227, + 304 + ], + "type": "text", + "content": "For every new problem encountered:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 145, + 304, + 234, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 304, + 234, + 312 + ], + "spans": [ + { + "bbox": [ + 145, + 304, + 234, + 312 + ], + "type": "text", + "content": "1. Evaluate the Solution's Effectiveness" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 145, + 312, + 227, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 312, + 227, + 318 + ], + "spans": [ + { + "bbox": [ + 145, + 312, + 227, + 318 + ], + "type": "text", + "content": "- Was the applied strategy optimal?" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 145, + 318, + 304, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 318, + 304, + 325 + ], + "spans": [ + { + "bbox": [ + 145, + 318, + 304, + 325 + ], + "type": "text", + "content": "- Could the solution be improved, generalized, or made more efficient?" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 145, + 325, + 339, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 325, + 339, + 331 + ], + "spans": [ + { + "bbox": [ + 145, + 325, + 339, + 331 + ], + "type": "text", + "content": "- Does the cheatsheet already contain a similar strategy, or should a new one be added?" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 333, + 258, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 333, + 258, + 340 + ], + "spans": [ + { + "bbox": [ + 145, + 333, + 258, + 340 + ], + "type": "text", + "content": "2. Curate & Document the Most Valuable Insights" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 145, + 340, + 400, + 360 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 145, + 340, + 400, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 340, + 400, + 347 + ], + "spans": [ + { + "bbox": [ + 145, + 340, + 400, + 347 + ], + "type": "text", + "content": "- Extract key algorithms, heuristics, and reusable code snippets that would help solve similar problems in the future." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 145, + 347, + 318, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 347, + 318, + 354 + ], + "spans": [ + { + "bbox": [ + 145, + 347, + 318, + 354 + ], + "type": "text", + "content": "- Identify patterns, edge cases, and problem-specific insights worth retaining." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 145, + 354, + 334, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 354, + 334, + 360 + ], + "spans": [ + { + "bbox": [ + 145, + 354, + 334, + 360 + ], + "type": "text", + "content": "- If a better approach than a previously recorded one is found, replace the old version." + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 362, + 235, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 362, + 235, + 369 + ], + "spans": [ + { + "bbox": [ + 145, + 362, + 235, + 369 + ], + "type": "text", + "content": "3. Maintain Concise, Actionable Entries" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 145, + 369, + 331, + 388 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 145, + 369, + 286, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 369, + 286, + 375 + ], + "spans": [ + { + "bbox": [ + 145, + 369, + 286, + 375 + ], + "type": "text", + "content": "- Keep explanations clear, actionable, concise, and to the point." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 145, + 375, + 290, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 375, + 290, + 381 + ], + "spans": [ + { + "bbox": [ + 145, + 375, + 290, + 381 + ], + "type": "text", + "content": "- Include only the most effective and widely applicable methods." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 145, + 381, + 331, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 381, + 331, + 388 + ], + "spans": [ + { + "bbox": [ + 145, + 381, + 331, + 388 + ], + "type": "text", + "content": "- Seek to extract useful and general solution strategies and/or Python code snippets." + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 391, + 216, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 391, + 216, + 397 + ], + "spans": [ + { + "bbox": [ + 145, + 391, + 216, + 397 + ], + "type": "text", + "content": "4. Implement a Usage Counter" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 145, + 398, + 410, + 411 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 145, + 398, + 410, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 398, + 410, + 404 + ], + "spans": [ + { + "bbox": [ + 145, + 398, + 410, + 404 + ], + "type": "text", + "content": "Each entry must include a usage count: Increase the count every time a strategy is successfully used in problem-solving." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 145, + 405, + 320, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 405, + 320, + 411 + ], + "spans": [ + { + "bbox": [ + 145, + 405, + 320, + 411 + ], + "type": "text", + "content": "- Use the count to prioritize frequently used solutions over rarely applied ones." + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 422, + 212, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 422, + 212, + 428 + ], + "spans": [ + { + "bbox": [ + 145, + 422, + 212, + 428 + ], + "type": "text", + "content": "Memory Update Format" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 145, + 429, + 257, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 429, + 257, + 435 + ], + "spans": [ + { + "bbox": [ + 145, + 429, + 257, + 435 + ], + "type": "text", + "content": "Use the following structure for each memory item:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 145, + 436, + 154, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 436, + 154, + 441 + ], + "spans": [ + { + "bbox": [ + 145, + 436, + 154, + 441 + ], + "type": "text", + "content": "··" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 145, + 445, + 185, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 445, + 185, + 451 + ], + "spans": [ + { + "bbox": [ + 145, + 445, + 185, + 451 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 145, + 452, + 179, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 452, + 179, + 457 + ], + "spans": [ + { + "bbox": [ + 145, + 452, + 179, + 457 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 145, + 457, + 380, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 457, + 380, + 464 + ], + "spans": [ + { + "bbox": [ + 145, + 457, + 380, + 464 + ], + "type": "text", + "content": "[Briefly describe the problem context, purpose, and key aspects of the solution.] (Reference: Q1, Q2, Q6, etc.)" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 145, + 464, + 180, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 464, + 180, + 471 + ], + "spans": [ + { + "bbox": [ + 145, + 464, + 180, + 471 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 145, + 472, + 173, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 472, + 173, + 478 + ], + "spans": [ + { + "bbox": [ + 145, + 472, + 173, + 478 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 145, + 478, + 331, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 478, + 331, + 485 + ], + "spans": [ + { + "bbox": [ + 145, + 478, + 331, + 485 + ], + "type": "text", + "content": "[Provide a well-documented code snippet, worked-out solution, or efficient strategy.]" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 145, + 485, + 174, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 485, + 174, + 491 + ], + "spans": [ + { + "bbox": [ + 145, + 485, + 174, + 491 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 145, + 491, + 187, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 491, + 187, + 498 + ], + "spans": [ + { + "bbox": [ + 145, + 491, + 187, + 498 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 145, + 498, + 311, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 498, + 311, + 505 + ], + "spans": [ + { + "bbox": [ + 145, + 498, + 311, + 505 + ], + "type": "text", + "content": "** Count: [Number of times this strategy has been used to solve a problem.]" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 145, + 506, + 185, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 506, + 185, + 513 + ], + "spans": [ + { + "bbox": [ + 145, + 506, + 185, + 513 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 145, + 514, + 154, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 514, + 154, + 519 + ], + "spans": [ + { + "bbox": [ + 145, + 514, + 154, + 519 + ], + "type": "text", + "content": "[...]" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 145, + 520, + 187, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 520, + 187, + 526 + ], + "spans": [ + { + "bbox": [ + 145, + 520, + 187, + 526 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 145, + 526, + 176, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 526, + 176, + 533 + ], + "spans": [ + { + "bbox": [ + 145, + 526, + 176, + 533 + ], + "type": "text", + "content": "** Count: [...]" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 145, + 535, + 155, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 535, + 155, + 542 + ], + "spans": [ + { + "bbox": [ + 145, + 535, + 155, + 542 + ], + "type": "text", + "content": "[...]" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 145, + 544, + 185, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 544, + 185, + 551 + ], + "spans": [ + { + "bbox": [ + 145, + 544, + 185, + 551 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 145, + 552, + 154, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 552, + 154, + 557 + ], + "spans": [ + { + "bbox": [ + 145, + 552, + 154, + 557 + ], + "type": "text", + "content": "[...]" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 145, + 558, + 187, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 558, + 187, + 564 + ], + "spans": [ + { + "bbox": [ + 145, + 558, + 187, + 564 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 145, + 573, + 448, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 573, + 448, + 586 + ], + "spans": [ + { + "bbox": [ + 145, + 573, + 448, + 586 + ], + "type": "text", + "content": "- Prioritize accuracy, efficiency & generalizability: The cheatsheet should capture insights that apply across multiple problems rather than just storing isolated solutions." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 145, + 586, + 407, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 586, + 407, + 594 + ], + "spans": [ + { + "bbox": [ + 145, + 586, + 407, + 594 + ], + "type": "text", + "content": "- Ensure clarity & usability: Every update should make the cheatsheet more structured, actionable, and easy to navigate." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 145, + 594, + 372, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 594, + 372, + 600 + ], + "spans": [ + { + "bbox": [ + 145, + 594, + 372, + 600 + ], + "type": "text", + "content": "- Maintain a balance: While adding new strategies, ensure that old but effective techniques are not lost." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 145, + 600, + 444, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 600, + 444, + 613 + ], + "spans": [ + { + "bbox": [ + 145, + 600, + 444, + 613 + ], + "type": "text", + "content": "- Keep it evolving: The cheatsheet should be a living document that continuously improves over time, enhancing test-time meta-learning capabilities." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 145, + 615, + 436, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 615, + 436, + 622 + ], + "spans": [ + { + "bbox": [ + 145, + 615, + 436, + 622 + ], + "type": "text", + "content": "N.B. Keep in mind that once the cheatsheet is updated, any previous content not directly included will be lost and cannot be retrieved." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 145, + 622, + 446, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 622, + 446, + 628 + ], + "spans": [ + { + "bbox": [ + 145, + 622, + 446, + 628 + ], + "type": "text", + "content": "Therefore, make sure to explicitly copy any (or all) relevant information from the previous cheatsheet to the new cheatsheet! Furthermore," + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 145, + 629, + 361, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 629, + 361, + 635 + ], + "spans": [ + { + "bbox": [ + 145, + 629, + 361, + 635 + ], + "type": "text", + "content": "make sure that all information related to the cheatsheet is wrapped inside the block." + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 52, + 647, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 647, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 647, + 544, + 718 + ], + "type": "text", + "content": "Figure 14: Prompt used for the memory curator under DC-RS, which is responsible for maintaining an evolving repository of problem-solving strategies, code snippets, and heuristics. The curator selectively retains high-value insights, refines existing strategies, and organizes memory efficiently. This ensures the memory (cheatsheet) remains concise, generalizable, and action-oriented, continuously improving test-time reasoning. (Once again, we note that during the initial phases of our experiments, we used \"cheatsheet\" and \"memory\" interchangeably to describe stored problem-solving content. However, to maintain consistency, we formally define " + }, + { + "bbox": [ + 52, + 647, + 544, + 718 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 52, + 647, + 544, + 718 + ], + "type": "text", + "content": " as memory throughout this paper. Since this was purely a semantic choice, we did not find it necessary to rerun our experiments to reflect this terminology shift.)" + } + ] + } + ], + "index": 74 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 302, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 302, + 741 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 302, + 741 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 75 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 68, + 457, + 355 + ], + "blocks": [ + { + "bbox": [ + 143, + 68, + 457, + 355 + ], + "lines": [ + { + "bbox": [ + 143, + 68, + 457, + 355 + ], + "spans": [ + { + "bbox": [ + 143, + 68, + 457, + 355 + ], + "type": "image", + "image_path": "c18c5a4c320e8bf795d22cf17be40b64fa6fba5af8154a28995e9bc5885a3668.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 362, + 459, + 374 + ], + "lines": [ + { + "bbox": [ + 135, + 362, + 459, + 374 + ], + "spans": [ + { + "bbox": [ + 135, + 362, + 459, + 374 + ], + "type": "text", + "content": "Figure 15: The rest of the prompt used by the memory curator under DC-RS (Figure 14)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "spans": [ + { + "bbox": [ + 170, + 45, + 424, + 56 + ], + "type": "text", + "content": "Dynamic Cheatsheet: Test-Time Learning with Adaptive Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_content_list.json b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1931a648e8d4f1989c6e5461e85bc215a52501a7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_content_list.json @@ -0,0 +1,2076 @@ +[ + { + "type": "text", + "text": "Perception-R1: Pioneering Perception Policy with Reinforcement Learning", + "text_level": 1, + "bbox": [ + 199, + 122, + 799, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "En Yu $^{1,\\mathbb{I}}$ , Kangheng Lin $^{2,\\mathbb{I}}$ , Liang Zhao $^{3,\\mathbb{I}}$ , Jisheng Yin $^{3}$ , Yana Wei $^{4}$ , Yuang Peng $^{5}$ , Haoran Wei $^{3}$ , Jianjian Sun $^{3}$ , Chunrui Han $^{3}$ , Zheng Ge $^{3}$ , Xiangyu Zhang $^{3}$ , Daxin Jiang $^{3}$ , Jingyu Wang $^{2}$ , Wenbing Tao $^{1\\dagger}$ $^{1}$ Huazhong University of Science and Technology \n $^{2}$ Beijing University of Posts and Telecommunications \n $^{3}$ StepFun \n $^{4}$ Johns Hopkins University \n ${}^{5}$ Tingshua University \n{yuen, wenbingtao}@hust.edu.cn \nhttps://github.com/linkangheng/PR1", + "bbox": [ + 184, + 223, + 816, + 340 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 376, + 537, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by the success of DeepSeek-R1, we explore the potential of rule-based reinforcement learning (RL) in MLLM post-training for perception policy learning. While promising, our initial experiments reveal that incorporating a thinking process through RL does not consistently lead to performance gains across all visual perception tasks. This leads us to delve into the essential role of RL in the context of visual perception. In this work, we return to the fundamentals and explore the effects of RL on different perception tasks. We observe that the perceptual perplexity is a major factor in determining the effectiveness of RL. We also observe that reward design plays a crucial role in further approaching the upper limit of model perception. To leverage these findings, we propose Perceptron-R1, a scalable RL framework using GRPO during MLLM post-training. With a standard Qwen2-VL-2B-Instruct, Perception-R1 achieves $+4.2\\%$ on RefCOCO+, $+17.9\\%$ on PixMo-Count, $+4.2\\%$ on PageOCR, and notably, $31.9\\%$ AP on COCO2017 val1 for the first time, establishing a strong baseline for perception policy learning.", + "bbox": [ + 228, + 407, + 767, + 602 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 628, + 310, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\"We do not see the world as it is, but as we are — or as we are conditioned to see it.\"", + "bbox": [ + 184, + 662, + 686, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stephen R. Covey", + "bbox": [ + 700, + 685, + 807, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The landscape of large language model (LLM) has undergone a paradigm shift from non-reasoning foundation model, e.g., GPT-4/4o [44, 19], DeepSeek-V3 [33], to strongly reasoning model, e.g., OpenAI o1/o3 [45], DeepSeek-R1 [12], and Kimi-1.5 [57]. DeepSeek-R1, in particular, introduced a simple yet effective rule-based reinforcement learning (RL) approach [55], enabling emergent reasoning patterns without relying on traditional scaffolding techniques such as Monte Carlo Tree Search (MCTS) [17, 67] or Process Reward Models (PRM) [31]. This has catalyzed a new revolution in LLM post-training techniques, prompting researchers to develop more powerful reasoning language models [42, 24].", + "bbox": [ + 169, + 714, + 823, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite these advancements, current explorations predominantly focus on the purely linguistic domain, and the unimodal nature of these reasoning models limits their ability to engage with the world in a truly perceptive way. To bridge this gap, this work takes a pioneering step in exploring", + "bbox": [ + 169, + 832, + 823, + 875 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07954v1 [cs.CV] 10 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding author, † Core contribution", + "bbox": [ + 191, + 883, + 450, + 898 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 313, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the potential of perception policy learning within multimodal LLMs [61, 3] from lens of RL. While transferring RL techniques with reasoning processes, i.e., chain-of-thought [66], from the language domain shows promise on certain visual tasks, our empirical studies reveal that this approach is not universally effective. This inevitably prompts us to reexamine the role that RL play in visual perception tasks, and how the utilization of RL can lead to better and scalable perception policy.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The current understanding of RL as a post-training technique is primarily grounded in purely linguistic tasks [24] and language-centric multimodal tasks [10]. However, the characteristics of visual perception tasks are fundamentally distinct from those of natural language, necessitating a revised understanding of RL in the context of visual perception. Specifically, visual perception possesses two unique properties, as follows:", + "bbox": [ + 169, + 167, + 823, + 237 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Visual perception is embodied in the objective physical world. It possesses definite physical truth values, e.g., points, lines, or bounding boxes, but it lacks semantics compared to language.", + "- Visual perception, e.g., visual grounding and counting, are mostly \"single-step\" direct predictions. It lacks structured reasoning search space for RL exploration." + ], + "bbox": [ + 168, + 242, + 823, + 304 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These two characteristics determine that the application of RL to visual perception will have different properties from pure language [24] and language-centric multimodal [39, 41] approaches. In this work, we delve into the RL post-training of MLLM in the domain of visual perception, and further complements and extends the above understanding. Through extensive experimental analysis, we have uncovered several bitter yet valuable findings.", + "bbox": [ + 169, + 311, + 823, + 380 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Explicit thinking process (CoT) during RL is not necessary for current perception policy. (§ 5.2) We observe that the model without thinking process performs better than the one with thinking process.", + "- Reward design plays a pivotal role in perception policy learning. (§ 5.3) An appropriate reward function will lead to a healthier learning curve and explore stronger perceptual patterns of MLLM.", + "- Perceptual perplexity determines RL superiority over SFT. (§ 5.2) We observe that RL can bring more significant improvement compared to SFT on more complex visual tasks, e.g., object detection." + ], + "bbox": [ + 168, + 386, + 823, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Driven by these findings, we present a simple, effective, and scalable RL framework, i.e., Perception-R1, for efficient perception policy learning. Inspired by mainstream language reasoning models [12, 57], Perception-R1 applies rule-based RL algorithm GRPO [55] during MLLM post-training stage. With a vanilla Qwen2-VL-2B-Instruct [61], Perception-R1 achieves significant improvement on multiple visual perception benchmarks, e.g., $+4.2\\%$ on RefCOCO+ [40], $+17.9\\%$ on PixMoCount [13], and $+4.2\\%$ F1-score on PageOCR [34]. More importantly, Perception-R1 serves as the first time to enable a pure MLLM to reach $31.9\\%$ mAP on the object detection benchmark COCO2017 [32] va1, showcasing the great potential of general foundation models to surpass expert models in mainstream visual tasks. We hope our method, results, and analysis will inspire future research on perception policy learning with RL.", + "bbox": [ + 169, + 493, + 826, + 632 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 171, + 651, + 330, + 667 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multimodal Foundation and Reasoning Models. Recently, vision-language models [37, 3, 73, 70] have demonstrated remarkable capabilities in visual comprehension [64, 68] and generation [14, 48] through large-scale pretraining [2, 61] and visual instruction tuning [37, 35]. These models integrate visual modalities into a unified semantic space via visual encoders [49] and adapters [11, 37], while leveraging auto-regressive large language models [59, 1] as decoders for output generation. Despite the advancements in multimodal foundation models, their visual reasoning capabilities remain in an early developmental stage. Recent approaches [8, 39, 41] have explored reinforcement learning (RL) post-training to enhance visual reasoning. However, they primarily focus on language-centric tasks such as ambiguous reference resolution [39] and geometric problem-solving [41], while overlooking critical aspects of perception-driven reasoning. In this work, we take a pioneering step in utilizing RL for perception policy learning, aiming to bridge this gap and advance multimodal reasoning.", + "bbox": [ + 169, + 681, + 826, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Visual Perception in Multimodal Models. Visual Perception, as a concept in the field of computer vision [21, 52, 20, 69, 29], refers to the process of interpreting and understanding sensory, i.e., vision, information from the real-word. In the context of multimodal LLMs (MLLM), visual perception plays a crucial role in enabling the models to integrate, comprehend and reason visual information from the image or video. Existing MLLM generally enhance their visual perception capabilities by", + "bbox": [ + 169, + 840, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "designing more advanced visual perception architectures [63, 64], more suitable visual-language modeling strategies [70, 68], and more sophisticated post-training techniques [74]. This work aims to explore the potential of further enhancing visual perception from the perspective of RL.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RL-based Post-training in LLMs and MLLMs. Reinforcement learning (RL) has emerged as a pivotal paradigm for refining LLMs through alignment with human preferences and task-specific objectives. Prominent approaches like Reinforcement Learning from Human Feedback (RLHF) [46] and Direct Preference Optimization (DPO) [50] have demonstrated remarkable success in enhancing safety, coherence, and instruction-following capabilities of LLMs [43, 47, 44] and MLLMs [74, 60]. Recently, rule-based RL techniques, represented by GRPO [55], have demonstrated the potential for large-scale RL applications. LLMs have officially entered the era of strongly reasoning models. Subsequently, MLLMs [8, 39, 41] have also quickly followed this technology. However, so far, there has been no exciting, true \"Aha Moment\" in the multimodal domain. This study aims to investigate the potential contributions of RL to multimodal models, focusing on visual perception.", + "bbox": [ + 169, + 138, + 826, + 280 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Preliminaries", + "text_level": 1, + "bbox": [ + 171, + 299, + 318, + 314 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Perception Policy Definition. The goal of perception policy in visual-language context is enabling the model to first $(i)$ extract and understand visual information from the environment [37, 68], then $(ii)$ perform logical reasoning based on this understanding [73, 70] to $(iii)$ accomplish specific tasks and further interact with the environment [5, 22]. In this work, we aim to empower the model to deal with a series of pure visual, e.g., counting, detection, and visual-language, e.g., grounding, optical character recognition (OCR), tasks through perception policy learning.", + "bbox": [ + 169, + 330, + 823, + 416 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Group Relative Policy Optimization (GRPO [55]) is a rule-based reinforcement learning algorithm tailored for post-training LLMs. Its core idea is to use group relative rewards to optimize the policy, eliminating the need for a separate critic model [54]. Specifically, GRPO samples multiple outputs $(\\mathbf{o}_1 \\sim \\mathbf{o}_{\\mathbf{g}}$ in Figure 1) from the old policy for the same input, calculates the average reward of these outputs as the baseline, and uses the relative rewards to guide policy updates. The optimization objective of GRPO can be formulated as following:", + "bbox": [ + 169, + 421, + 825, + 505 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\text {o l d}}} (O | q) ]}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 535, + 450, + 553 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}} \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t} \\right] - \\beta \\mathbb {D} _ {\\mathrm {K L}} [ \\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}} ] \\right\\},\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 556, + 810, + 599 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {D} _ {\\mathrm {K L}} \\left[ \\pi_ {\\theta} \\| \\pi_ {\\text {r e f}} \\right] = \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - \\log \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - 1, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 619, + 823, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\epsilon$ and $\\beta$ are hyper-parameters, and $\\hat{A}_{i,t}$ is the advantage, computed using a group of rewards $\\{r_1,r_2,\\dots ,r_G\\}$ corresponding to the outputs within each group. Refer to [12, 55] for more details.", + "bbox": [ + 171, + 666, + 826, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 Perception-R1", + "text_level": 1, + "bbox": [ + 171, + 715, + 323, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In a nutshell, our Perception-R1 applies the rule-based RL algorithm GRPO [55] to the post-training stage of MLLM and optimizes the reward modeling to support perception policy learning. Figure 1 illustrates the idea, more approach and implementation details introduced next.", + "bbox": [ + 169, + 748, + 823, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1 Rule-based Reward Modeling", + "text_level": 1, + "bbox": [ + 169, + 808, + 421, + 824 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The reward function serves as the principal training signal in reinforcement learning (RL), directing the optimization process. Existing LLM methods [12, 57, 24] basically apply a highly resilient, rule-based reward system consisting of only two reward types: Format Reward and Answer Reward.", + "bbox": [ + 169, + 835, + 826, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Format Reward. In existing LLM and MLLM, the output format is comprised of two essential components: the final output format and the intermediate reasoning process format. The reward for", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg", + "image_caption": [ + "Figure 1: Illustration of Perception-R1 framework. Following DeepSeek-R1 [12], we prompt MLLM model to generate several rollout responses and apply GRPO [55] during post-training stage." + ], + "image_footnote": [], + "bbox": [ + 194, + 85, + 803, + 262 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the final output is defined in accordance with specific task requirements and is typically encapsulated within `` tags, whereas the reward for the intermediate reasoning process generally mandates that the reasoning steps be enclosed within `` tags. Formally,", + "bbox": [ + 169, + 325, + 826, + 369 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 393, + 825, + 428 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Perception-R1, we follow this setting. A subtle difference emerges that visual perception task frequently requires the output of object coordinates, e.g., bounding box, lines, or points. Consequently, the output format must be strictly constrained to the $[x1, y1, x2, y2]$ structure.", + "bbox": [ + 169, + 441, + 826, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Answer Reward. The Answer Reward pertains to the correctness of model-generated responses, serving as a central consideration in reward design. Typically, outputs from language models are abstract and semantically rich, requiring validation through external mechanisms such as code-based ADE [12] or mathematical answer verification [55]. In contrast, visual perception tasks benefit from clearly defined physical ground truths, which simplify the development of a robust reward function.", + "bbox": [ + 169, + 489, + 825, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Perception-R1 diverges from LLM approaches by anchoring the reward mechanism in visual discrimination. This departure is pivotal, as it replaces the often implicit and subjective feedback mechanisms typical of language models with an explicit, quantifiable metric. Formally, discriminative reward $r_i$ can be represented as:", + "bbox": [ + 169, + 565, + 828, + 622 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} = \\Phi \\left(o _ {i}, z\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 651, + 825, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\Phi(\\cdot)$ indicates the discriminative function, for example, IoU for bounding box and euclidean distance for point. By leveraging visual discrimination, we provide the model with a clear and objective feedback signal, ensuring the model's policy update with precise measured margin.", + "bbox": [ + 169, + 681, + 825, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Multi-Subject Reward Matching", + "text_level": 1, + "bbox": [ + 171, + 739, + 441, + 757 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In natural environments, physical objects rarely appear in isolation and instead frequently co-occur in groups. This inherent complexity gives rise to a challenge we define as reward matching, which entails aligning the model's output with the corresponding ground truth before reward computation. Specifically, when prompting the model to predict the attributes of multiple subjects within an image, e.g., points and bounding box, it is necessary to determine the appropriate ground truth reference for each subject to ensure accurate reward assignment.", + "bbox": [ + 169, + 766, + 826, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, let $y = \\{y_{i}\\}_{i=1}^{N}$ denote the set of predicted attributes for $N$ subjects, and let $z = \\{z_{j}\\}_{j=1}^{M}$ represent the corresponding ground truth attributes. We model the reward matching problem as a bipartite graph matching task, where one set of nodes corresponds to predictions and the other to ground truths. The edge weight between a prediction $y_{i}$ and a ground truth $t_{j}$ is determined by the", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "reward function $\\Phi(y_i, z_j)$ defined in Eq. 3, which measures their similarity or compatibility. The objective is to find the optimal assignment that maximizes the total reward:", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\sigma} = \\underset {\\sigma \\in \\Omega_ {N}} {\\arg \\max } \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\sigma (i)}), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 142, + 825, + 184 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Omega_N$ is the set of all valid assignments between predictions and ground truths. To solve this optimization problem efficiently, we employ the Hungarian algorithm [27], a well-established method for bipartite graph matching that guarantees the optimal pairing by maximizing the overall reward (or equivalently, minimizing the cost). This ensures that each predicted attribute is accurately matched with its corresponding ground truth, thereby optimizing the reward computation process.", + "bbox": [ + 169, + 191, + 823, + 266 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After the optimal reward assignment is determined, we calculate the answer reward by aggregating the individual rewards for each subject. Mathematically, the overall reward score is defined as:", + "bbox": [ + 169, + 268, + 823, + 297 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {a n s w e r}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\hat {\\sigma} (i)}), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 319, + 825, + 359 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {t o t a l}} = S _ {\\text {f o r m a t}} + S _ {\\text {a n s w e r}}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 362, + 557, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{\\sigma}$ is the optimal assignment obtained via the Hungarian algorithm. In Perception-R1, we primarily use reward matching for visual counting and object detection tasks, as these involve multiple objects.", + "bbox": [ + 169, + 388, + 823, + 433 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Perception-R1 Configuration", + "text_level": 1, + "bbox": [ + 171, + 446, + 415, + 462 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model Setting. Our model implementation follows Qwen2-VL [61]. We mainly use the Qwen2-VL-Instruct-2B as the baseline model. We also utilize Qwen2.5-VL-3B-Instruct [3] for training object detection tasks, due to its specialized optimization for localizing bounding boxes. The input image resolution for Qwen2-VL is dynamic cooperated with 2D-RoPE [56].", + "bbox": [ + 169, + 472, + 825, + 529 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Task and Data Setting. Given that Perception-R1 is primarily oriented towards pure visual and visual-language tasks, we select several mainstream and representative downstream tasks for perception policy learning, specifically including visual grounding, e.g., refCOCO [71] / + [71] / g [40], OCR, i.e., PageOCR [34], visual counting, i.e., Pixmo-Count [13], and object detection, i.e., COCO2017 [32]. For each task, a subset $(5k\\sim 10k)$ of samples are respectively extracted as base data for individual post-training. More details are in appendix A.1.", + "bbox": [ + 169, + 534, + 826, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Setting. We focus on the RL-based post-training stage of MLLM. All the selected base models have already undergone pre-training and SFT stage. During RL stage, the initial learning rate is set as $1e - 6$ with 8 rollouts by default and a batch size of 1. The following are some important hyper-parameters during post-training. Prompts detailed settings are in the appendix A.1.", + "bbox": [ + 169, + 623, + 823, + 680 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/89c30d1304cdad26e2e739586527ddda8f446e389ea70502641287d4b639d78e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Gradient AccumulationRollout GKL CoefficientMax Response LenTemperature
280.0420481.0
", + "bbox": [ + 181, + 686, + 815, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reward Setting. We tailor distinct discriminative rewards for various visual perception tasks. For the grounding task, the reward is based on the Intersection over Union (IoU) between the predicted output and the ground truth. In the counting task, we adopt a paradigm similar to Qwen2.5-VL, which first detects points and then counts them. Here, the reward is derived from the Euclidean distance computed during reward matching. For OCR, the edit distance serves as the primary reward metric. Lastly, in object detection, we combine multiple rewards: an object number reward based on the F1 score, a location reward using IoU, and a binary classification reward with a missing penalty.", + "bbox": [ + 169, + 724, + 826, + 823 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Sampling Setting. Following Kimi-1.5 [57], we adopt a curriculum sampling strategy that begins with easier data and gradually transitions to more challenging examples. Specifically, for the object detection task, we first conduct offline training on the COCO dataset to compute reward values. Based on the selected rewards, i.e., number reward, we partition the dataset accordingly. As training advances, we progressively replace the data with more difficult samples (i.e., those associated with lower rewards) while concurrently increasing the rollout to broaden the model's exploration space.", + "bbox": [ + 169, + 827, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/e44a87e5b4cbe43f61787ce03abac0772efea141fb0986021b5a52e9dcc58f20.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
methodsizeRefCOCO
val@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-87.590.482.6--------
OFA [62]-88.490.683.3--------
LLaVA-1.5 [35]7B49.154.943.310.713.66.90.40.30.320.122.9
LLaVA-NeXT [36]7B82.588.474.045.754.835.61.92.60.743.448.6
LLaVA-OV [28]7B73.082.363.524.229.615.90.50.50.532.637.5
Qwen2-VL [61]2B86.889.682.077.280.670.133.035.726.965.768.6
Perception-R12B89.191.484.579.583.672.435.038.528.867.971.2
RefCOCO+
methodsizeval@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-81.185.572.9--------
OFA [62]-81.387.174.2--------
LLaVA-1.5 [35]7B42.449.736.49.812.46.40.50.50.217.620.8
LLaVA-NeXT [36]7B74.584.064.741.551.830.01.92.71.039.346.2
LLaVA-OV [28]7B65.879.057.223.628.815.30.60.60.430.036.1
Qwen2-VL [61]2B77.182.570.168.773.860.029.432.323.058.462.9
Perception-R12B81.786.874.373.679.364.232.636.926.762.667.7
RefCOCOg
methodsizeval@50test@50val@75test@75val@95test@95valAvgtestAvg
MDETR [25]-83.383.3------
OFA [62]-82.282.3------
LLaVA-1.5 [35]7B43.245.18.59.30.30.317.318.2
LLaVA-NeXT [36]7B77.577.140.739.91.81.740.039.6
LLaVA-OV [28]7B70.870.823.323.60.60.731.631.7
Qwen2-VL [61]2B83.383.172.773.028.927.961.661.3
Perception-R12B85.785.475.776.032.133.164.564.8
", + "bbox": [ + 178, + 87, + 815, + 484 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/568578d2d46e7653152d99d920d5ccffedb75b5abfb947b17fdb84cd37721d67.jpg", + "table_caption": [ + "Table 1: Visual grounding benchmark evaluation. To comprehensively assess the model's grounding capability, we select referring expression comprehension (REC) benchmark, i.e., RefCOCO [71], RefCOCO+[71], and RefCOCOg[40] for evaluation. The expert model is denoted in gray." + ], + "table_footnote": [], + "table_body": "
sizeEdit Distance ↓F1-score ↑Precision ↑Recall ↑BLEU ↑METEOR ↑
enzhenzhenzhenzhenzhenzh
Nougat [4]250M25.5-74.5-72.0-80.9-66.5-76.1-
DocOwl1.5 [23]7B25.8-86.2-83.5-96.2-78.8-85.8-
GOT [65]580M3.53.897.298.097.198.297.397.894.787.895.893.9
Qwen2-VL [61]2B8.010.094.493.096.996.193.090.590.978.094.187.2
LLaVA-NeXT [36]7B43.0-64.7-57.3-88.1-47.8-58.2-
Perception-R12B3.59.098.294.498.696.397.892.796.774.698.188.9
", + "bbox": [ + 173, + 537, + 820, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: PageOCR evaluation, compared with various strong expert and general models. \"en\" means English and \"zh\" means Chinese.", + "bbox": [ + 169, + 655, + 823, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 171, + 691, + 313, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The experimental section evaluates Perception-R1's performance on visual perception tasks (§ 5.1), followed by analytical experiments exploring reinforcement learning (RL)'s role in perception policy learning (§ 5.2). Finally, it discusses the interplay between visual perception and RL, along with key insights for perception policy learning (§ 5.3).", + "bbox": [ + 169, + 723, + 826, + 780 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Performance Landscape in Perception Tasks", + "text_level": 1, + "bbox": [ + 171, + 796, + 522, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate Perception-R1 on mainstream perception tasks: visual grounding, counting, OCR, and object detection. Experiments use the datasets described in § 4.3 and benchmarks for image understanding. Results are in Tables 1-4. See Appendix A.2 for details.", + "bbox": [ + 169, + 821, + 825, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Visual Grounding is a task that involves localizing visual objects based on linguistic descriptions. Specifically, given a language prompt, the model is required to output the spatial coordinates of the subject (typically a single entity) described in the prompt. As shown in Table 1, we evaluate", + "bbox": [ + 169, + 869, + 826, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/22e55b7f6a2c932d951d78c5fb9270f67a7affd9a4fdf14064edc50383995f6b.jpg", + "table_caption": [], + "table_footnote": [ + "(a) Visual counting evaluation on Pixmo-Count [13] \nval set and test set." + ], + "table_body": "
methodsizeVisual Counting
PixmovalPixmotoetest
LLaVA-1.5 [35]7B33.331.0
LLaVA-1.6 [58]7B32.731.9
LLaVA-OV [28]7B55.853.7
Qwen2-VL [61]2B60.250.5
Perception-R12B78.175.6
", + "bbox": [ + 178, + 89, + 486, + 186 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/67a5388ba51edf551e5fad1bcf4c017c24711e7b0aff42cdea97dc8a11150861.jpg", + "table_caption": [], + "table_footnote": [ + "(b) Object detection evaluation on COCO2017 [32] validation set." + ], + "table_body": "
methodsizeepochObject Detection
AP\\( AP_{50} \\)\\( AP_{75} \\)
YOLOv3 [51]-27327.949.228.3
Faster-RCNN [52]-1235.655.737.9
DETR [6]41M50042.062.444.2
Qwen2.5-VL [3]3B116.123.716.7
Perception-R1†3B131.946.733.4
", + "bbox": [ + 504, + 89, + 821, + 186 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6b1dde97d407fde40d64c3c974e5631c46e7f989a63f11f79ab2852f4c146256.jpg", + "table_caption": [ + "Table 3: Mainstream visual tasks evaluation including (a) visual object counting and (b) challenging general object detection. Notably, the results of expert model in (b) are copied from MMDetection [7]. $\\dagger$ means Perception-R1 for object detection is build based on Qwen2.5-VL-3B-Instruct [3]." + ], + "table_footnote": [], + "table_body": "
llmMMBenchMMVetMMStar ScienceQASeedBenchMMELLaVA-BenchAI2D
AvgAvgAvgAvgAvgAvgAvgCognitionPerceptionAvgAvg
LLaVA1.5 [35]Vicuna1.5-7B62.832.832.665.460.1302.11338.352.651.9
LLaVA-NeXT [36]Vicuna1.5-7B66.037.937.768.269.1195.71419.552.767.4
Qwen2-VL [61]Qwen2-2B71.945.646.374.072.7418.51471.146.571.6
Perception-R1Qwen2-2B71.848.945.773.473.0430.01473.958.271.8
", + "bbox": [ + 174, + 270, + 818, + 357 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: General image understanding and reasoning evaluation, compared with various baselines. We select 8 mainstream multimodal benchmarks, i.e., MMBench [38], MMVet [72], MMStar [9], ScienceQA [53], SeedBench [18], MME [16], LLaVA-Bench [37], and ai2D [26] for the comprehensive understanding. We use the model after RL training in the counting tasks for the eval.", + "bbox": [ + 169, + 362, + 826, + 417 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Perception-R1 on three mainstream benchmarks, refCOCO / + / g, and report Acc@0.5, Acc@0.75, and Acc@0.95 to comprehensively assess its visual grounding capability. We surprisingly find that several SoTA MLLMs exhibit poor performance on the more challenging Acc@0.95 metric, with scores even below 1%. In contrast, Perception-R1 achieves a stable performance of over 30% on this metric. This observation suggests that the community should prioritize reporting more discriminative results in future evaluations. The experimental results demonstrate that Perception-R1 exhibits strong competitiveness compared to both specialized and general-purpose models.", + "bbox": [ + 169, + 435, + 823, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Optical Character Recognition (OCR) represents a critical task in visual perception due to its substantial practical value. Current methodologies predominantly adopt either expert models or fine-tuned generalist models for OCR. Perception-R1 pioneers the utilization of RL to further unlock the OCR capabilities of MLLM. As shown in Table 2, our proposed Perception-R1 achieves SoTA performance on the highly challenging OCR benchmark, i.e., PageOCR [34], demonstrating significant superiority over existing expert models, e.g., GOT (98.1 vs. 97.2 F1-score) and robust generalist models, e.g., LLaVA-NeXT (98.1 vs. 64.7 F1-score). Notably, Perception-R1 does not use the Chinese OCR data for training so it is a zero-shot performance for Chinese metric. This breakthrough substantiates the formidable potential of RL applications in OCR tasks, establishing new frontiers for enhancing textual understanding and recognition in complex visual environments.", + "bbox": [ + 169, + 537, + 826, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual Counting, as a fundamental vision task, necessitates models to accurately quantify category-specific instances within images, requiring robust visual logic to identify and enumerate targets through structured recognition patterns. In Perception-R1, we adopt a detect-then-count paradigm that reformulates the counting problem into a point detection process. As shown in Table 3a, Perception-R1 achieves remarkable counting performance, surpassing the current strong baselines by a substantial margin (17.9% improvement compared to Qwen2-VL in Pixmo val set). This advancement substantiates that RL effectively stimulates models to explore intrinsic visual logic mechanisms (Although counting yields deterministic results, the sequence of counting can exhibit distinct patterns.), thereby enhancing their capacity to resolve complex vision tasks.", + "bbox": [ + 169, + 683, + 826, + 809 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "General Object Detection, widely regarded as the crown jewel of computer vision tasks, has long been considered one of the most challenging problems in visual perception. As a pioneering endeavor to integrate RL into object detection, Perception-R1 achieves a groundbreaking milestone, serving as the first pure MLLM to surpass the $30+$ AP threshold, i.e., 31.9 AP in Table 3b, on the COCO 2017 val set, matching or even exceeding the performance of specialized expert models. This achievement underscores rule-based RL's immense potential in addressing complex vision tasks requiring sophisticated visual-logic integration.", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b7035c312ed2b32280d4a48e901452b0da27d5e28e8481473c4d5374ca58c5bf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
caseVisual GroundingOCR PageOCRVisual CountingDetection COCO2017
RefCOCORefCOCO+RefCOCOgPixmovalPixmotest
Perception-R189.181.785.798.478.175.631.9
w/o reward matching----77.175.423.5
w/o RL86.877.183.394.460.250.516.1
w thinking75.167.971.377.374.972.825.7
w/o thinking89.181.785.795.778.175.628.1
RL only89.181.785.795.778.175.631.9
SFT only88.280.784.695.358.059.925.9
SFT+RL88.480.785.197.377.175.430.8
", + "bbox": [ + 200, + 87, + 794, + 218 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/17e909db05a21591ec24e42bc0b1d2eb629c10109a7cd0731914d981373ef4e4.jpg", + "table_caption": [ + "Table 5: Ablation Study of Perception-R1. We perform ablation studies to investigate key properties of Perception-R1 across a range of visual perception tasks. Specifically, we report the Acc@0.5 for RefCOCO / + / g val set, the F1-score for PageOCR, the average scores for Pixmo-Count, and the AP metric for COCO2017 val set. w/o means without. Notably, there is no reward matching applied to visual grounding and OCR tasks, as these tasks do not involve the multi-subject reward." + ], + "table_footnote": [], + "table_body": "
reward functionCOCO2017
AP\\( AP_{50} \\)\\( AP_{75} \\)
format reward---
format reward + location reward (IoU)18.825.320.1
format reward + location reward (IoU) + cls reward20.227.321.4
format reward + location reward (IoU) + cls reward + recall reward (F1)27.642.028.7
format reward + location reward (IoU) + cls reward + recall reward (F1) + missing reward28.142.029.6
", + "bbox": [ + 173, + 291, + 823, + 392 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6: Reward design analysis of Perception-R1. pls reward indicates binary classification reward and missing reward is a penalty to penalize missed detections. To facilitate rapid experimentation, we randomly sampled 10k data from COCO2017 train set for this experiment.", + "bbox": [ + 169, + 395, + 823, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "General Visual Comprehension extends beyond pure perceptual tasks, and we evaluate Perception-R1 on multiple multimodal benchmarks. As shown in Table 4, we observe an intriguing phenomenon that models trained with RL for vision-specific tasks, e.g., counting task, exhibit concurrent performance gains in generic comprehension benchmarks. We attribute this cross-task enhancement to the perception policy learning, which drives the model to discover superior image interpretation patterns.", + "bbox": [ + 169, + 444, + 826, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 Ablation Study of Perception-R1", + "text_level": 1, + "bbox": [ + 171, + 532, + 439, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we aim to conduct a comprehensive ablation study to systematically investigate the contributions of critical components within Perception-R1. Experimental results are shown in Table 5. From the experimental results, we can derive three principal empirical findings:", + "bbox": [ + 169, + 559, + 826, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reward matching enhances the explorability of multi-subject visual perception. As evidenced by the comparative results between row 1 and 2 in Table 5, replacing the bipartite matching with sequential matching leads to substantial performance degradation in both visual counting and object detection task. This suggests that sequential matching constrains the RL exploration space. On the contrast, the bipartite matching mechanism provides more possibility in reward assignment, enabling the model to explore optimal visual perception patterns.", + "bbox": [ + 169, + 607, + 823, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Explicit thinking processes prove non-essential for contemporary visual perception. Comparative analysis of row 3 and 4 reveals consistent performance degradation across all four evaluated perception tasks when incorporating an explicit thinking process during both training and inference phases. Similar phenomenon also emerges in image classification tasks [30]. We posit that this phenomenon arises because current visual perception tasks are more oriented toward visual logic rather than semantic logic. This shift implies that explicit language-centered reasoning processes are unnecessary, as models tend to focus more on learning implicit visual patterns.", + "bbox": [ + 169, + 696, + 826, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Perceptual perplexity determines RL superiority over SFT. We compare the different combinations of post-training method, i.e., SFT, RL, and $\\mathrm{SFT + RL}$ , across four perception tasks, as shown in row 6, 7, 8 of Table 5. In tasks with high perceptual perplexity, such as counting and detection (multiple objects and categories), RL demonstrates superior performance enhancement compared to SFT or even $\\mathrm{SFT + RL}$ . Conversely, in low-perplexity tasks such as grounding and OCR, RL underperforms relative to SFT or $\\mathrm{SFT + RL}$ . This indicates that high perceptual perplexity a significant factor influencing the effectiveness of RL. It suggests that RL techniques should be applied to tasks with greater perceptual perplexity, where the exploration space for perception policy is larger.", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg", + "image_caption": [ + "(a) Grounding reward" + ], + "image_footnote": [], + "bbox": [ + 181, + 88, + 331, + 204 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg", + "image_caption": [ + "(b) Grounding performance" + ], + "image_footnote": [], + "bbox": [ + 341, + 88, + 495, + 203 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg", + "image_caption": [ + "(c) Counting reward", + "Figure 2: Scalability analysis of Perception-R1. We select two primary tasks: grounding and counting. We visualize the training reward curves under varying numbers of rollouts and evaluate the final performance of each task. All experiments are conducted with $5k$ sampled data. And the default rollout number setting $(1\\times)$ is 8." + ], + "image_footnote": [], + "bbox": [ + 504, + 89, + 651, + 204 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg", + "image_caption": [ + "(d) Counting performance" + ], + "image_footnote": [], + "bbox": [ + 663, + 89, + 815, + 203 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3 More In-depth Analysis", + "text_level": 1, + "bbox": [ + 171, + 305, + 379, + 320 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we explore several key properties of Perception-R1 to further enhance our understanding of Perception Policy Learning with RL.", + "bbox": [ + 169, + 335, + 825, + 364 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Analysis of reward design for perception policy learning. We introduced the details of reward function of Perception-R1 in § 4.3. In this part, we examine the influence of these reward functions on perception policy learning. Specifically, using object detection as a case study, we incrementally integrate the designed answer reward into the format reward, as illustrated in Table 6. The results indicate that the progressive introduction of refined reward functions leads to consistent improvements in detection performance, ultimately exceeding the performance of expert models. This underscores the critical role of reward design in perception policy learning. Furthermore, it identifies a promising avenue for future research: the development of more refined and task-specific reward functions to enhance perception policy learning.", + "bbox": [ + 169, + 369, + 823, + 494 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Analysis of scaling up rollout for perception policy learning. The scalability of RL is a key concern of existing LLM post-training. In this part, we analyze the scalability of Perception-R1, focusing specifically on scaling up the number of rollouts. As shown in Figure 2, we conduct rollout-scaling experiments in two tasks: visual grounding and visual counting. The results indicate that increasing rollout count enhances reward optimization and final performance. This demonstrates Perception-R1's strong scaling properties and underscores the critical role of rollout quantity in scaling perception policies. By generating sufficient rollouts, the model broadens its exploration space, increasing the diversity of candidate solutions for reward evaluation. This expansion accelerates convergence to optimal visual perception patterns.", + "bbox": [ + 169, + 501, + 823, + 626 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Limitation and Conclusion", + "text_level": 1, + "bbox": [ + 171, + 657, + 429, + 674 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "\"What can RL bring to MLLM?\" is a public question since the propose of DeepSeek-R1. Several latest works attempt to apply RL from the perspective of language-centric visual reasoning [39, 15, 41]. However, in this paper, we take a different pathway and argue that perception is a crucial prerequisite for visual reasoning. Only by fully unlocking the perception patterns of MLLMs can the models possess the ability to reason about complex visual tasks. Nevertheless, we regrettably find that many current perception tasks are overly simplistic, which limits the exploration space for RL. This, in turn, restricts the possibility of MLLMs achieving a perceptual \"Aha moment\" through thinking process. Finding more appropriate perception tasks, aka., meta task, may be the key to addressing this issue.", + "bbox": [ + 169, + 696, + 826, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In a summary, this work takes a pioneering step in exploring the potential of rule-based RL in MLLM post-training for perception policy learning. Through extensive experimental analysis, we establish several valuable cognition about perception policy learning with RL. Driven by these findings, we build Perception-R1, a simple, effective, and scalable RL framework for efficient perception policy learning. Perception-R1 sets new SoTAs across multiple visual perception tasks, particularly in object detection tasks. By introducing a novel paradigm, it achieves and even surpasses the performance of expert models, thereby demonstrating the significant potential of perception policy learning.", + "bbox": [ + 169, + 814, + 823, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 104 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023.", + "[2] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.", + "[3] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "[4] Lukas Blecher, Guillem Cucurull, Thomas Scialom, and Robert Stojnic. Nougat: Neural optical understanding for academic documents. arXiv preprint arXiv:2308.13418, 2023.", + "[5] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023.", + "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020.", + "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019.", + "[8] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025.", + "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.", + "[10] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025.", + "[11] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024.", + "[12] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun" + ], + "bbox": [ + 173, + 112, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, Zilin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025.", + "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024.", + "[14] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, Xiangwen Kong, Xiangyu Zhang, Kaisheng Ma, and Li Yi. DreamLLM: Synergistic multimodal comprehension and creation. In The Twelfth International Conference on Learning Representations, 2024.", + "[15] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025.", + "[16] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023.", + "[17] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning, 2024.", + "[18] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023.", + "[19] GPT-4o. Hello gpt-4o, 2024.", + "[20] Kaiming He, Georgia Gkioxari, Piotr Dólár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017.", + "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.", + "[22] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14281-14290, 2024.", + "[23] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. mplug-docowl 1.5: Unified structure learning forocr-free document understanding. arXiv preprint arXiv:2403.12895, 2024.", + "[24] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021.", + "[26] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016.", + "[27] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955.", + "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024.", + "[29] Jinyang Li, En Yu, Sijia Chen, and Wenbing Tao. Ovtr: End-to-end open-vocabulary multiple object tracking with transformer. arXiv preprint arXiv:2503.10616, 2025.", + "[30] Ming Li, Shitian Zhao, Jike Zhong, Yuxiang Lai, and Kaipeng Zhang. Cls-rl: Image classification with rule-based reinforcement learning. arXiv preprint arXiv:2503.16188, 2025.", + "[31] Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023.", + "[32] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014.", + "[33] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024.", + "[34] Chenglong Liu, Haoran Wei, Jinyue Chen, Lingyu Kong, Zheng Ge, Zining Zhu, Liang Zhao, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Focus anywhere for fine-grained multi-page document understanding. arXiv preprint arXiv:2405.14295, 2024.", + "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024.", + "[36] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024.", + "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024.", + "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023.", + "[39] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025.", + "[40] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11–20, 2016.", + "[41] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.", + "[43] OpenAI. Chatgpt. https://openai.com/blog/chatgpt, 2022.", + "[44] OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "[45] OpenAI. Learning to reason with llms, September 2024.", + "[46] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022.", + "[47] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022.", + "[48] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024.", + "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.", + "[50] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024.", + "[51] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018.", + "[52] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016.", + "[53] Tanik Saikh, Tirthankar Ghosal, Amish Mittal, Asif Ekbal, and Pushpak Bhattacharyya. Scienceqa: A novel resource for question answering on scholarly articles. International Journal on Digital Libraries, 23(3):289-301, 2022.", + "[54] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "[55] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[56] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024.", + "[57] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "[58] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024.", + "[59] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + ], + "bbox": [ + 171, + 90, + 826, + 911 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[60] Fei Wang, Wenxuan Zhou, James Y Huang, Nan Xu, Sheng Zhang, Hoifung Poon, and Muhao Chen. mdpo: Conditional preference optimization for multimodal large language models. arXiv preprint arXiv:2406.11839, 2024.", + "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.", + "[62] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022.", + "[63] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, Jinrong Yang, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Vary: Scaling up the vision vocabulary for large vision-language model. In European Conference on Computer Vision, pages 408-424. Springer, 2024.", + "[64] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, En Yu, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Small language model meets with reinforced vision vocabulary. arXiv preprint arXiv:2401.12503, 2024.", + "[65] Haoran Wei, Chenglong Liu, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, Zheng Ge, Liang Zhao, Jianjian Sun, Yuang Peng, et al. GeneralOCR theory: TowardsOCR-2.0 via a unified end-to-end model. arXiv preprint arXiv:2409.01704, 2024.", + "[66] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.", + "[67] Huajian Xin, Z. Z. Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, Wenjun Gao, Qihao Zhu, Dejian Yang, Zhibin Gou, Z. F. Wu, Fuli Luo, and Chong Ruan. Deepseek-prover-v1.5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search, 2024.", + "[68] En Yu, Kangheng Lin, Liang Zhao, Yana Wei, Zining Zhu, Haoran Wei, Jianjian Sun, Zheng Ge, Xiangyu Zhang, Jingyu Wang, et al. Unhackable temporal rewarding for scalable video mllms. arXiv preprint arXiv:2502.12081, 2025.", + "[69] En Yu, Tiancai Wang, Zhuoling Li, Yang Zhang, Xiangyu Zhang, and Wenbing Tao. Motrv3: Releasefetch supervision for end-to-end multi-object tracking. arXiv preprint arXiv:2305.14298, 2023.", + "[70] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023.", + "[71] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016.", + "[72] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023.", + "[73] Liang Zhao, En Yu, Zheng Ge, Jinrong Yang, Haoran Wei, Hongyu Zhou, Jianjian Sun, Huang Peng, Runpei Dong, Chunrui Han, et al. Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474, 2023.", + "[74] Zining Zhu, Liang Zhao, Kangheng Lin, Jinze Yang, En Yu, Chenglong Liu, Haoran Wei, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Perpo: Perceptual preference optimization via discriminative rewarding. arXiv preprint arXiv:2502.04371, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 907 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A Appendix", + "text_level": 1, + "bbox": [ + 171, + 90, + 292, + 107 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this appendix, we provide additional details about Perception-R1, which are omitted due to the 9-page limit of the main paper. Specifically, Section A.1 elaborates on the detailed dataset and training settings. Section A.2 presents more experimental results.", + "bbox": [ + 169, + 119, + 823, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 Additional Details about Experimental Setting", + "text_level": 1, + "bbox": [ + 171, + 179, + 540, + 194 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "More detailed dataset information of Perception-R1. In Section 4.3, we introduced what data was used for RL post-training of Perception-R1 on which tasks. In this part, we will provide more detailed information about the datasets, as shown in Table 7.", + "bbox": [ + 169, + 204, + 823, + 246 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/98895dacdebc846941fa4240b973e0e77e41234c273900b597e8f824a43d6b97.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
tasksdatasetsOriginalUsedRatio
visual groundingRefCOCO / RefCOCO+ / RefCOCOg320k5k1.56%
OCRPageOCR50k5k10%
visual countingPixMo-Count1.9M10k0.5%
object detectionCOCO2017110k110k100%
overall-2.38M130k-
", + "bbox": [ + 243, + 257, + 750, + 344 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "More detailed training setting information of Perception-R1. Section 4.3 elaborates on several key parameters of Perception-R1. In this part, we further demonstrate the diverse prompts employed for distinct perception tasks, as shown in Table 8.", + "bbox": [ + 169, + 393, + 823, + 436 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/60e4f5a6e447288126a3a996a74edce8999b36c1645b0807a13af873d56e9b91.jpg", + "table_caption": [ + "Table 7: Training dataset statistics. Notably, we do not mix the data from different perception tasks for joint training because the rewards for different tasks vary." + ], + "table_footnote": [], + "table_body": "
taskssystem promptuser prompt
visual groundingQwen2-VLOutput the bounding box of the {question} in the image.
OCRQwen2-VLOCR this image.
visual countingQwen2-VLOutput all the bounding boxes of the {label}
object detectionQwen2.5-VLPlease output bbox coordinates and names of {90 categories of COCO}.
", + "bbox": [ + 173, + 449, + 823, + 523 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 8: Prompts of Perception-R1. The system prompt of Perception-R1 follows Qwen2-VL [61] and Qwen2.5-VL [3].", + "bbox": [ + 169, + 527, + 823, + 559 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.2 Additional Experimental Results", + "text_level": 1, + "bbox": [ + 171, + 580, + 444, + 597 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we provide more qualitative analysis of Perception-R1 on multiple visual perception tasks. The selected cases are shown in Figure 3-6.", + "bbox": [ + 169, + 607, + 823, + 635 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg", + "image_caption": [ + "Figure 3: Demo case of Percpetion-R1 on visual counting task." + ], + "image_footnote": [], + "bbox": [ + 209, + 650, + 787, + 854 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg", + "image_caption": [ + "Input:" + ], + "image_footnote": [], + "bbox": [ + 243, + 157, + 308, + 166 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg", + "image_caption": [ + "Figure 4: Demo case of Percpetion-R1 on OCR task." + ], + "image_footnote": [], + "bbox": [ + 339, + 167, + 503, + 243 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Riding Dirty", + "text_level": 1, + "bbox": [ + 243, + 181, + 300, + 205 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A muddy mix of road \nThe wind is a bit \nCyclocross doubles the \nthrill of both sports. Here's \nthe gear to get you started.", + "bbox": [ + 243, + 208, + 310, + 231 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "by BERNSTEIN 100VY", + "bbox": [ + 245, + 234, + 287, + 239 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Cyclosis is a cool term. It means to drop-hotelize bikes with little or no need to walk. It is a course that often includes a variety of exercises, such as as well as obstacles that force you to get your legs on the ground. A bike over your shoulder. \"All you need is a bike and have a good attitude and confidence in your ability to walk,\" says Stu Thorne, founder and CEO of the professional cyclosis team.", + "bbox": [ + 243, + 243, + 302, + 297 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "BEST PCHENTY LEVEL", + "bbox": [ + 349, + 250, + 411, + 256 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "What you canificn with extra weight you make up for with a highly capable automobile frame that you'll want to keep rolling between trips. A carbon-bond tire helps eat and move the vehicle. The car also has a high quality底盘, an excellent底盘, and a 100%底盘 driven power through any grade. And TBP cable disc brakes perform well even when the vehicle is on the road. The steering system is also very useful to consider if you're looking for a bike that can travel all way.", + "bbox": [ + 352, + 257, + 493, + 279 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 243, + 284, + 426, + 359 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 284, + 496, + 306 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A premium neck bike, this should be your next choice. The Carbone Factory offers a 2000 inch, 16-in. front bottom bracket and relatively easy to install. It's also suitable for hard, solid, hard or soft through cracks. This is the best way to get one of these items. They can be run tubes to better make it easier to use. They mean they spring freely when called on to do so. They are lightweight and fast- and something you won't have to deal with in special area $3,000", + "bbox": [ + 429, + 306, + 496, + 358 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 243, + 368, + 290, + 402 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Craft Shield Glove \nGlove \ngloves \ngloves from sailor \ngloves from sailor \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea", + "bbox": [ + 243, + 405, + 285, + 441 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 299, + 368, + 339, + 402 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Darn Tough Microer Crews can be a great way to complete your cyclical routines you know. The best way to get the best, fight, strong and cool are to do so with the \"power of the soul.\"", + "bbox": [ + 297, + 405, + 336, + 441 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 369, + 395, + 404 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Park Tool Rapho Brush Set Leng W 10mm 2. This tool usually dirts the skin and acts as a light source for key components. This kit the teeth are used to clean your bony look cheek off. You would need to purchase part.com e600 regal.co.uk", + "bbox": [ + 346, + 405, + 418, + 441 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 368, + 439, + 404 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Rapha Arm and Leg Warmers \nA has a lot of Lycia for the warm season. \nThe warm weather up, etc. \nThe warm air is coming from the warm air. \nLycia from E70", + "bbox": [ + 398, + 405, + 444, + 441 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 450, + 368, + 496, + 404 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Topeak SmartGaUGE D2 1000000000000000000000000000000000000000000000000000000000000000", + "bbox": [ + 450, + 405, + 496, + 441 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "MEN'S JOURNAL 94 NOVEMBER 2016", + "bbox": [ + 331, + 444, + 411, + 449 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 181, + 460, + 238, + 470 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "OCR this image.", + "bbox": [ + 230, + 476, + 369, + 488 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Perception-R1:", + "text_level": 1, + "bbox": [ + 181, + 500, + 269, + 511 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": " G E A R L A B\\nRiding Dirty\\n\\nA muddy mix of road- and mountain-bike racing, cyclocross doubles the thrill of both sports. Here's the gear to get you started.\\n\\nBryne Bredy\\n\\nCyclocross is as cool as it looks: Racers pedal drop-handlebikes with trail-ready tires through a course that often includes pavement, dirt, mud, and grass, as well as obstacles that force you to dismount and sling your bike over your shoulder. \"All you really need is to be tough and have a good attitude and a desire to suffer a little bit,\" says Stu Thorne, founder and director of the Cannondale professional cyclocross team. The right gear helps, too.\\nBest for Entry Level\\n\\nCAADX 105\\n\\nWhat you sacrifice with extra weight you make up for with a highly capable aluminum frame that'll keep you going between races. A carbon-blade fork helps eat up some of the pounding the courses can dish out. The dependable 2x11 speed Shimano 105 drivetrain powers through any grade, and TRP cable disc brakes perform well even in mud. Meanwhile, the 35cc tires and stable geometry make the CAADX a superfun commuter if you're looking for a bike that can multitask\\n\\nCannondale.com $\\mathbb{S}1,970\\backslash \\mathbb{n}\\backslash \\mathbb{n}$ Best for Experienced Cyclists\\n\\nCruX Elite X1\\n\\nIf you already log long miles on a premium road bike, this should be your new cross rig. The carbon frame is as light as it is.\\n\\nLow bottom bracket and relatively slack head angle, which make it stable, helped us sail through rocks and roots and corner quickly. The tires can be run tubeless to better resist flats, and extra clearance means they spin freely when caked with mud. The CruX Elite is playful and fast — and something you won't outgrow as you collect medals.\\n\\nGlove\\n\\nThe cross season typically runs from September through February, so you'll need hearty gloves like these, with a fleece lining and a waterproof base, for warmth on wet race days. craftsports.us $\\mathbb{S}78\\backslash \\mathbb{n}\\backslash \\mathbb{N}$ Darn Tough Micro Crew\\n\\nUnlike other bike races, cyclocross requires you to be on foot at times. So light, strong socks are key. These aren't likely to wear out, but Darn Tough will replace them if they do. darntough.com $\\mathbb{S}18\\backslash \\mathbb{n}\\backslash \\mathbb{N}$ Park Tool Brush Set\\n\\nThe mud, dirt, and grime that builds up during off-road rides can damage key components. This kit does more than just keep your bike looking fresh; it keeps it healthy, too. parktool.com $\\mathbb{S}80\\backslash \\mathbb{n}\\backslash \\mathbb{n}$ Rapha Arm and Leg Warmer\\n\\nThese merino layers, which have a bit of Lycra for stretch, peel off easily when the weather warms up. And they dry quickly, whether you sweat profusely or get caught in a sudden squall. rapha.cc From $\\mathbb{S}70\\backslash \\mathbb{n}\\backslash \\mathbb{N}$ Topeak SmartGauge D2\\n\\nFor peak performance, adjust your tire pressure to suit the terrain. (On soft trails, lower pressure makes it grip better.) The SmartGauge makes it a snap with a fast, readable result. topeak.com $\\$ 40$", + "bbox": [ + 222, + 518, + 815, + 796 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg", + "image_caption": [ + "Input:" + ], + "image_footnote": [], + "bbox": [ + 248, + 155, + 488, + 281 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg", + "image_caption": [ + "Output:", + "Figure 5: Demo case of Percpetion-R1 on visual grounding task." + ], + "image_footnote": [], + "bbox": [ + 540, + 155, + 782, + 280 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 210, + 285, + 261, + 296 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Output the bounding box of the woman holding a child in the image.", + "bbox": [ + 254, + 299, + 624, + 310 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Perception-R1:", + "text_level": 1, + "bbox": [ + 210, + 320, + 287, + 330 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The woman holding a child(157,459), (208,622)", + "bbox": [ + 254, + 334, + 483, + 345 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg", + "image_caption": [ + "Input:", + "Figure 6: Demo case of Percpception-R1 on general object detection task. The color of bounding boxes correspond to the category." + ], + "image_footnote": [], + "bbox": [ + 250, + 512, + 486, + 636 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg", + "image_caption": [ + "Output:" + ], + "image_footnote": [], + "bbox": [ + 539, + 513, + 776, + 635 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Question:", + "text_level": 1, + "bbox": [ + 210, + 643, + 261, + 654 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Please output bbox coordinates and names of {90 categories of COCO}.", + "bbox": [ + 254, + 657, + 638, + 669 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Perception-R1:", + "text_level": 1, + "bbox": [ + 212, + 678, + 289, + 688 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[ {\"bbox_2d\": [435, 1, 616, 415], \"label\": \"person\"}, {\"bbox_2d\": [218, 112, 323, 253], \"label\": \"person\"}, {\"bbox_2d\": [147, 165, 225, 280], \"label\": \"person\"}, {\"bbox_2d\": [183, 126, 239, 242], \"label\": \"person\"}, {\"bbox_2d\": [276, 77, 338, 192], \"label\": \"person\"}, {\"bbox_2d\": [127, 54, 471, 406], \"label\": \"laptop\"}, {\"bbox_2d\": [108, 174, 156, 243], \"label\": \"cell phone\"}, {\"bbox_2d\": [26, 107, 103, 214], \"label\": \"potted plant\"}, {\"bbox_2d\": [64, 29, 280, 137], \"label\": \"chair\"} ]", + "guess_lang": "json", + "bbox": [ + 256, + 690, + 496, + 795 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_model.json b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..70a0f420e9d22a2ee135f10903d1a52d6d7fd3d5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_model.json @@ -0,0 +1,2984 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.07954v1 [cs.CV] 10 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.2, + 0.123, + 0.8, + 0.174 + ], + "angle": 0, + "content": "Perception-R1: Pioneering Perception Policy with Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.224, + 0.817, + 0.342 + ], + "angle": 0, + "content": "En Yu\\(^{1,\\mathbb{I}}\\), Kangheng Lin\\(^{2,\\mathbb{I}}\\), Liang Zhao\\(^{3,\\mathbb{I}}\\), Jisheng Yin\\(^{3}\\), Yana Wei\\(^{4}\\), Yuang Peng\\(^{5}\\), Haoran Wei\\(^{3}\\), Jianjian Sun\\(^{3}\\), Chunrui Han\\(^{3}\\), Zheng Ge\\(^{3}\\), Xiangyu Zhang\\(^{3}\\), Daxin Jiang\\(^{3}\\), Jingyu Wang\\(^{2}\\), Wenbing Tao\\(^{1\\dagger}\\) \n\\(^{1}\\)Huazhong University of Science and Technology \n\\(^{2}\\)Beijing University of Posts and Telecommunications \n\\(^{3}\\)StepFun \n\\(^{4}\\)Johns Hopkins University \n\\({}^{5}\\)Tingshua University \n{yuen, wenbingtao}@hust.edu.cn \nhttps://github.com/linkangheng/PR1" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.377, + 0.538, + 0.392 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.409, + 0.768, + 0.603 + ], + "angle": 0, + "content": "Inspired by the success of DeepSeek-R1, we explore the potential of rule-based reinforcement learning (RL) in MLLM post-training for perception policy learning. While promising, our initial experiments reveal that incorporating a thinking process through RL does not consistently lead to performance gains across all visual perception tasks. This leads us to delve into the essential role of RL in the context of visual perception. In this work, we return to the fundamentals and explore the effects of RL on different perception tasks. We observe that the perceptual perplexity is a major factor in determining the effectiveness of RL. We also observe that reward design plays a crucial role in further approaching the upper limit of model perception. To leverage these findings, we propose Perceptron-R1, a scalable RL framework using GRPO during MLLM post-training. With a standard Qwen2-VL-2B-Instruct, Perception-R1 achieves \\(+4.2\\%\\) on RefCOCO+, \\(+17.9\\%\\) on PixMo-Count, \\(+4.2\\%\\) on PageOCR, and notably, \\(31.9\\%\\) AP on COCO2017 val1 for the first time, establishing a strong baseline for perception policy learning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.629, + 0.312, + 0.644 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.664, + 0.687, + 0.677 + ], + "angle": 0, + "content": "\"We do not see the world as it is, but as we are — or as we are conditioned to see it.\"" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.686, + 0.808, + 0.699 + ], + "angle": 0, + "content": "Stephen R. Covey" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.715, + 0.825, + 0.827 + ], + "angle": 0, + "content": "The landscape of large language model (LLM) has undergone a paradigm shift from non-reasoning foundation model, e.g., GPT-4/4o [44, 19], DeepSeek-V3 [33], to strongly reasoning model, e.g., OpenAI o1/o3 [45], DeepSeek-R1 [12], and Kimi-1.5 [57]. DeepSeek-R1, in particular, introduced a simple yet effective rule-based reinforcement learning (RL) approach [55], enabling emergent reasoning patterns without relying on traditional scaffolding techniques such as Monte Carlo Tree Search (MCTS) [17, 67] or Process Reward Models (PRM) [31]. This has catalyzed a new revolution in LLM post-training techniques, prompting researchers to develop more powerful reasoning language models [42, 24]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Despite these advancements, current explorations predominantly focus on the purely linguistic domain, and the unimodal nature of these reasoning models limits their ability to engage with the world in a truly perceptive way. To bridge this gap, this work takes a pioneering step in exploring" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.885, + 0.451, + 0.9 + ], + "angle": 0, + "content": "†Corresponding author, † Core contribution" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.315, + 0.937 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "the potential of perception policy learning within multimodal LLMs [61, 3] from lens of RL. While transferring RL techniques with reasoning processes, i.e., chain-of-thought [66], from the language domain shows promise on certain visual tasks, our empirical studies reveal that this approach is not universally effective. This inevitably prompts us to reexamine the role that RL play in visual perception tasks, and how the utilization of RL can lead to better and scalable perception policy." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.168, + 0.825, + 0.238 + ], + "angle": 0, + "content": "The current understanding of RL as a post-training technique is primarily grounded in purely linguistic tasks [24] and language-centric multimodal tasks [10]. However, the characteristics of visual perception tasks are fundamentally distinct from those of natural language, necessitating a revised understanding of RL in the context of visual perception. Specifically, visual perception possesses two unique properties, as follows:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.243, + 0.825, + 0.272 + ], + "angle": 0, + "content": "- Visual perception is embodied in the objective physical world. It possesses definite physical truth values, e.g., points, lines, or bounding boxes, but it lacks semantics compared to language." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.277, + 0.825, + 0.305 + ], + "angle": 0, + "content": "- Visual perception, e.g., visual grounding and counting, are mostly \"single-step\" direct predictions. It lacks structured reasoning search space for RL exploration." + }, + { + "type": "list", + "bbox": [ + 0.169, + 0.243, + 0.825, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.312, + 0.825, + 0.381 + ], + "angle": 0, + "content": "These two characteristics determine that the application of RL to visual perception will have different properties from pure language [24] and language-centric multimodal [39, 41] approaches. In this work, we delve into the RL post-training of MLLM in the domain of visual perception, and further complements and extends the above understanding. Through extensive experimental analysis, we have uncovered several bitter yet valuable findings." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.387, + 0.825, + 0.416 + ], + "angle": 0, + "content": "- Explicit thinking process (CoT) during RL is not necessary for current perception policy. (§ 5.2) We observe that the model without thinking process performs better than the one with thinking process." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.42, + 0.825, + 0.449 + ], + "angle": 0, + "content": "- Reward design plays a pivotal role in perception policy learning. (§ 5.3) An appropriate reward function will lead to a healthier learning curve and explore stronger perceptual patterns of MLLM." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.452, + 0.825, + 0.481 + ], + "angle": 0, + "content": "- Perceptual perplexity determines RL superiority over SFT. (§ 5.2) We observe that RL can bring more significant improvement compared to SFT on more complex visual tasks, e.g., object detection." + }, + { + "type": "list", + "bbox": [ + 0.169, + 0.387, + 0.825, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.494, + 0.827, + 0.633 + ], + "angle": 0, + "content": "Driven by these findings, we present a simple, effective, and scalable RL framework, i.e., Perception-R1, for efficient perception policy learning. Inspired by mainstream language reasoning models [12, 57], Perception-R1 applies rule-based RL algorithm GRPO [55] during MLLM post-training stage. With a vanilla Qwen2-VL-2B-Instruct [61], Perception-R1 achieves significant improvement on multiple visual perception benchmarks, e.g., \\(+4.2\\%\\) on RefCOCO+ [40], \\(+17.9\\%\\) on PixMoCount [13], and \\(+4.2\\%\\) F1-score on PageOCR [34]. More importantly, Perception-R1 serves as the first time to enable a pure MLLM to reach \\(31.9\\%\\) mAP on the object detection benchmark COCO2017 [32] va1, showcasing the great potential of general foundation models to surpass expert models in mainstream visual tasks. We hope our method, results, and analysis will inspire future research on perception policy learning with RL." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.652, + 0.331, + 0.669 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.683, + 0.827, + 0.837 + ], + "angle": 0, + "content": "Multimodal Foundation and Reasoning Models. Recently, vision-language models [37, 3, 73, 70] have demonstrated remarkable capabilities in visual comprehension [64, 68] and generation [14, 48] through large-scale pretraining [2, 61] and visual instruction tuning [37, 35]. These models integrate visual modalities into a unified semantic space via visual encoders [49] and adapters [11, 37], while leveraging auto-regressive large language models [59, 1] as decoders for output generation. Despite the advancements in multimodal foundation models, their visual reasoning capabilities remain in an early developmental stage. Recent approaches [8, 39, 41] have explored reinforcement learning (RL) post-training to enhance visual reasoning. However, they primarily focus on language-centric tasks such as ambiguous reference resolution [39] and geometric problem-solving [41], while overlooking critical aspects of perception-driven reasoning. In this work, we take a pioneering step in utilizing RL for perception policy learning, aiming to bridge this gap and advance multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.842, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Visual Perception in Multimodal Models. Visual Perception, as a concept in the field of computer vision [21, 52, 20, 69, 29], refers to the process of interpreting and understanding sensory, i.e., vision, information from the real-word. In the context of multimodal LLMs (MLLM), visual perception plays a crucial role in enabling the models to integrate, comprehend and reason visual information from the image or video. Existing MLLM generally enhance their visual perception capabilities by" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "designing more advanced visual perception architectures [63, 64], more suitable visual-language modeling strategies [70, 68], and more sophisticated post-training techniques [74]. This work aims to explore the potential of further enhancing visual perception from the perspective of RL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.281 + ], + "angle": 0, + "content": "RL-based Post-training in LLMs and MLLMs. Reinforcement learning (RL) has emerged as a pivotal paradigm for refining LLMs through alignment with human preferences and task-specific objectives. Prominent approaches like Reinforcement Learning from Human Feedback (RLHF) [46] and Direct Preference Optimization (DPO) [50] have demonstrated remarkable success in enhancing safety, coherence, and instruction-following capabilities of LLMs [43, 47, 44] and MLLMs [74, 60]. Recently, rule-based RL techniques, represented by GRPO [55], have demonstrated the potential for large-scale RL applications. LLMs have officially entered the era of strongly reasoning models. Subsequently, MLLMs [8, 39, 41] have also quickly followed this technology. However, so far, there has been no exciting, true \"Aha Moment\" in the multimodal domain. This study aims to investigate the potential contributions of RL to multimodal models, focusing on visual perception." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.3, + 0.32, + 0.315 + ], + "angle": 0, + "content": "3 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.825, + 0.417 + ], + "angle": 0, + "content": "Perception Policy Definition. The goal of perception policy in visual-language context is enabling the model to first \\((i)\\) extract and understand visual information from the environment [37, 68], then \\((ii)\\) perform logical reasoning based on this understanding [73, 70] to \\((iii)\\) accomplish specific tasks and further interact with the environment [5, 22]. In this work, we aim to empower the model to deal with a series of pure visual, e.g., counting, detection, and visual-language, e.g., grounding, optical character recognition (OCR), tasks through perception policy learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.826, + 0.506 + ], + "angle": 0, + "content": "Group Relative Policy Optimization (GRPO [55]) is a rule-based reinforcement learning algorithm tailored for post-training LLMs. Its core idea is to use group relative rewards to optimize the policy, eliminating the need for a separate critic model [54]. Specifically, GRPO samples multiple outputs \\((\\mathbf{o}_1 \\sim \\mathbf{o}_{\\mathbf{g}}\\) in Figure 1) from the old policy for the same input, calculates the average reward of these outputs as the baseline, and uses the relative rewards to guide policy updates. The optimization objective of GRPO can be formulated as following:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.536, + 0.452, + 0.554 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\text {o l d}}} (O | q) ]}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.558, + 0.811, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}} \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t} \\right] - \\beta \\mathbb {D} _ {\\mathrm {K L}} [ \\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}} ] \\right\\},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.289, + 0.621, + 0.825, + 0.653 + ], + "angle": 0, + "content": "\\[\n\\mathbb {D} _ {\\mathrm {K L}} \\left[ \\pi_ {\\theta} \\| \\pi_ {\\text {r e f}} \\right] = \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - \\log \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - 1, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.667, + 0.827, + 0.698 + ], + "angle": 0, + "content": "where \\(\\epsilon\\) and \\(\\beta\\) are hyper-parameters, and \\(\\hat{A}_{i,t}\\) is the advantage, computed using a group of rewards \\(\\{r_1,r_2,\\dots ,r_G\\}\\) corresponding to the outputs within each group. Refer to [12, 55] for more details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.717, + 0.325, + 0.734 + ], + "angle": 0, + "content": "4 Perception-R1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.749, + 0.825, + 0.793 + ], + "angle": 0, + "content": "In a nutshell, our Perception-R1 applies the rule-based RL algorithm GRPO [55] to the post-training stage of MLLM and optimizes the reward modeling to support perception policy learning. Figure 1 illustrates the idea, more approach and implementation details introduced next." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.809, + 0.422, + 0.825 + ], + "angle": 0, + "content": "4.1 Rule-based Reward Modeling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.836, + 0.827, + 0.879 + ], + "angle": 0, + "content": "The reward function serves as the principal training signal in reinforcement learning (RL), directing the optimization process. Existing LLM methods [12, 57, 24] basically apply a highly resilient, rule-based reward system consisting of only two reward types: Format Reward and Answer Reward." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Format Reward. In existing LLM and MLLM, the output format is comprised of two essential components: the final output format and the intermediate reasoning process format. The reward for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.195, + 0.087, + 0.805, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.271, + 0.828, + 0.302 + ], + "angle": 0, + "content": "Figure 1: Illustration of Perception-R1 framework. Following DeepSeek-R1 [12], we prompt MLLM model to generate several rollout responses and apply GRPO [55] during post-training stage." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.828, + 0.37 + ], + "angle": 0, + "content": "the final output is defined in accordance with specific task requirements and is typically encapsulated within `` tags, whereas the reward for the intermediate reasoning process generally mandates that the reasoning steps be enclosed within `` tags. Formally," + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.395, + 0.826, + 0.429 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.827, + 0.485 + ], + "angle": 0, + "content": "In Perception-R1, we follow this setting. A subtle difference emerges that visual perception task frequently requires the output of object coordinates, e.g., bounding box, lines, or points. Consequently, the output format must be strictly constrained to the \\([x1, y1, x2, y2]\\) structure." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.826, + 0.561 + ], + "angle": 0, + "content": "Answer Reward. The Answer Reward pertains to the correctness of model-generated responses, serving as a central consideration in reward design. Typically, outputs from language models are abstract and semantically rich, requiring validation through external mechanisms such as code-based ADE [12] or mathematical answer verification [55]. In contrast, visual perception tasks benefit from clearly defined physical ground truths, which simplify the development of a robust reward function." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.829, + 0.623 + ], + "angle": 0, + "content": "Perception-R1 diverges from LLM approaches by anchoring the reward mechanism in visual discrimination. This departure is pivotal, as it replaces the often implicit and subjective feedback mechanisms typical of language models with an explicit, quantifiable metric. Formally, discriminative reward \\( r_i \\) can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.652, + 0.826, + 0.668 + ], + "angle": 0, + "content": "\\[\nr _ {i} = \\Phi \\left(o _ {i}, z\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.683, + 0.826, + 0.726 + ], + "angle": 0, + "content": "where \\(\\Phi(\\cdot)\\) indicates the discriminative function, for example, IoU for bounding box and euclidean distance for point. By leveraging visual discrimination, we provide the model with a clear and objective feedback signal, ensuring the model's policy update with precise measured margin." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.741, + 0.442, + 0.758 + ], + "angle": 0, + "content": "4.2 Multi-Subject Reward Matching" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.827, + 0.852 + ], + "angle": 0, + "content": "In natural environments, physical objects rarely appear in isolation and instead frequently co-occur in groups. This inherent complexity gives rise to a challenge we define as reward matching, which entails aligning the model's output with the corresponding ground truth before reward computation. Specifically, when prompting the model to predict the attributes of multiple subjects within an image, e.g., points and bounding box, it is necessary to determine the appropriate ground truth reference for each subject to ensure accurate reward assignment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.914 + ], + "angle": 0, + "content": "Formally, let \\( y = \\{y_{i}\\}_{i=1}^{N} \\) denote the set of predicted attributes for \\( N \\) subjects, and let \\( z = \\{z_{j}\\}_{j=1}^{M} \\) represent the corresponding ground truth attributes. We model the reward matching problem as a bipartite graph matching task, where one set of nodes corresponds to predictions and the other to ground truths. The edge weight between a prediction \\( y_{i} \\) and a ground truth \\( t_{j} \\) is determined by the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "reward function \\(\\Phi(y_i, z_j)\\) defined in Eq. 3, which measures their similarity or compatibility. The objective is to find the optimal assignment that maximizes the total reward:" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.143, + 0.826, + 0.185 + ], + "angle": 0, + "content": "\\[\n\\hat {\\sigma} = \\underset {\\sigma \\in \\Omega_ {N}} {\\arg \\max } \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\sigma (i)}), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.193, + 0.825, + 0.267 + ], + "angle": 0, + "content": "where \\(\\Omega_N\\) is the set of all valid assignments between predictions and ground truths. To solve this optimization problem efficiently, we employ the Hungarian algorithm [27], a well-established method for bipartite graph matching that guarantees the optimal pairing by maximizing the overall reward (or equivalently, minimizing the cost). This ensures that each predicted attribute is accurately matched with its corresponding ground truth, thereby optimizing the reward computation process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.298 + ], + "angle": 0, + "content": "After the optimal reward assignment is determined, we calculate the answer reward by aggregating the individual rewards for each subject. Mathematically, the overall reward score is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.32, + 0.826, + 0.361 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {a n s w e r}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\hat {\\sigma} (i)}), \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.363, + 0.558, + 0.38 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {t o t a l}} = S _ {\\text {f o r m a t}} + S _ {\\text {a n s w e r}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.434 + ], + "angle": 0, + "content": "where \\(\\hat{\\sigma}\\) is the optimal assignment obtained via the Hungarian algorithm. In Perception-R1, we primarily use reward matching for visual counting and object detection tasks, as these involve multiple objects." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.447, + 0.416, + 0.463 + ], + "angle": 0, + "content": "4.3 Perception-R1 Configuration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.826, + 0.53 + ], + "angle": 0, + "content": "Model Setting. Our model implementation follows Qwen2-VL [61]. We mainly use the Qwen2-VL-Instruct-2B as the baseline model. We also utilize Qwen2.5-VL-3B-Instruct [3] for training object detection tasks, due to its specialized optimization for localizing bounding boxes. The input image resolution for Qwen2-VL is dynamic cooperated with 2D-RoPE [56]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.827, + 0.62 + ], + "angle": 0, + "content": "Task and Data Setting. Given that Perception-R1 is primarily oriented towards pure visual and visual-language tasks, we select several mainstream and representative downstream tasks for perception policy learning, specifically including visual grounding, e.g., refCOCO [71] / + [71] / g [40], OCR, i.e., PageOCR [34], visual counting, i.e., Pixmo-Count [13], and object detection, i.e., COCO2017 [32]. For each task, a subset \\((5k\\sim 10k)\\) of samples are respectively extracted as base data for individual post-training. More details are in appendix A.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.624, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Training Setting. We focus on the RL-based post-training stage of MLLM. All the selected base models have already undergone pre-training and SFT stage. During RL stage, the initial learning rate is set as \\(1e - 6\\) with 8 rollouts by default and a batch size of 1. The following are some important hyper-parameters during post-training. Prompts detailed settings are in the appendix A.1." + }, + { + "type": "table", + "bbox": [ + 0.183, + 0.688, + 0.816, + 0.717 + ], + "angle": 0, + "content": "
Gradient AccumulationRollout GKL CoefficientMax Response LenTemperature
280.0420481.0
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.725, + 0.827, + 0.824 + ], + "angle": 0, + "content": "Reward Setting. We tailor distinct discriminative rewards for various visual perception tasks. For the grounding task, the reward is based on the Intersection over Union (IoU) between the predicted output and the ground truth. In the counting task, we adopt a paradigm similar to Qwen2.5-VL, which first detects points and then counts them. Here, the reward is derived from the Euclidean distance computed during reward matching. For OCR, the edit distance serves as the primary reward metric. Lastly, in object detection, we combine multiple rewards: an object number reward based on the F1 score, a location reward using IoU, and a binary classification reward with a missing penalty." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Sampling Setting. Following Kimi-1.5 [57], we adopt a curriculum sampling strategy that begins with easier data and gradually transitions to more challenging examples. Specifically, for the object detection task, we first conduct offline training on the COCO dataset to compute reward values. Based on the selected rewards, i.e., number reward, we partition the dataset accordingly. As training advances, we progressively replace the data with more difficult samples (i.e., those associated with lower rewards) while concurrently increasing the rollout to broaden the model's exploration space." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.179, + 0.088, + 0.816, + 0.485 + ], + "angle": 0, + "content": "
methodsizeRefCOCO
val@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-87.590.482.6--------
OFA [62]-88.490.683.3--------
LLaVA-1.5 [35]7B49.154.943.310.713.66.90.40.30.320.122.9
LLaVA-NeXT [36]7B82.588.474.045.754.835.61.92.60.743.448.6
LLaVA-OV [28]7B73.082.363.524.229.615.90.50.50.532.637.5
Qwen2-VL [61]2B86.889.682.077.280.670.133.035.726.965.768.6
Perception-R12B89.191.484.579.583.672.435.038.528.867.971.2
RefCOCO+
methodsizeval@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-81.185.572.9--------
OFA [62]-81.387.174.2--------
LLaVA-1.5 [35]7B42.449.736.49.812.46.40.50.50.217.620.8
LLaVA-NeXT [36]7B74.584.064.741.551.830.01.92.71.039.346.2
LLaVA-OV [28]7B65.879.057.223.628.815.30.60.60.430.036.1
Qwen2-VL [61]2B77.182.570.168.773.860.029.432.323.058.462.9
Perception-R12B81.786.874.373.679.364.232.636.926.762.667.7
RefCOCOg
methodsizeval@50test@50val@75test@75val@95test@95valAvgtestAvg
MDETR [25]-83.383.3------
OFA [62]-82.282.3------
LLaVA-1.5 [35]7B43.245.18.59.30.30.317.318.2
LLaVA-NeXT [36]7B77.577.140.739.91.81.740.039.6
LLaVA-OV [28]7B70.870.823.323.60.60.731.631.7
Qwen2-VL [61]2B83.383.172.773.028.927.961.661.3
Perception-R12B85.785.475.776.032.133.164.564.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.488, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Table 1: Visual grounding benchmark evaluation. To comprehensively assess the model's grounding capability, we select referring expression comprehension (REC) benchmark, i.e., RefCOCO [71], RefCOCO+[71], and RefCOCOg[40] for evaluation. The expert model is denoted in gray." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.538, + 0.821, + 0.651 + ], + "angle": 0, + "content": "
sizeEdit Distance ↓F1-score ↑Precision ↑Recall ↑BLEU ↑METEOR ↑
enzhenzhenzhenzhenzhenzh
Nougat [4]250M25.5-74.5-72.0-80.9-66.5-76.1-
DocOwl1.5 [23]7B25.8-86.2-83.5-96.2-78.8-85.8-
GOT [65]580M3.53.897.298.097.198.297.397.894.787.895.893.9
Qwen2-VL [61]2B8.010.094.493.096.996.193.090.590.978.094.187.2
LLaVA-NeXT [36]7B43.0-64.7-57.3-88.1-47.8-58.2-
Perception-R12B3.59.098.294.498.696.397.892.796.774.698.188.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.656, + 0.825, + 0.684 + ], + "angle": 0, + "content": "Table 2: PageOCR evaluation, compared with various strong expert and general models. \"en\" means English and \"zh\" means Chinese." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.314, + 0.71 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.724, + 0.827, + 0.781 + ], + "angle": 0, + "content": "The experimental section evaluates Perception-R1's performance on visual perception tasks (§ 5.1), followed by analytical experiments exploring reinforcement learning (RL)'s role in perception policy learning (§ 5.2). Finally, it discusses the interplay between visual perception and RL, along with key insights for perception policy learning (§ 5.3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.523, + 0.812 + ], + "angle": 0, + "content": "5.1 Performance Landscape in Perception Tasks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.822, + 0.826, + 0.865 + ], + "angle": 0, + "content": "We evaluate Perception-R1 on mainstream perception tasks: visual grounding, counting, OCR, and object detection. Experiments use the datasets described in § 4.3 and benchmarks for image understanding. Results are in Tables 1-4. See Appendix A.2 for details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Visual Grounding is a task that involves localizing visual objects based on linguistic descriptions. Specifically, given a language prompt, the model is required to output the spatial coordinates of the subject (typically a single entity) described in the prompt. As shown in Table 1, we evaluate" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.179, + 0.09, + 0.487, + 0.188 + ], + "angle": 0, + "content": "
methodsizeVisual Counting
PixmovalPixmotoetest
LLaVA-1.5 [35]7B33.331.0
LLaVA-1.6 [58]7B32.731.9
LLaVA-OV [28]7B55.853.7
Qwen2-VL [61]2B60.250.5
Perception-R12B78.175.6
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.173, + 0.19, + 0.487, + 0.214 + ], + "angle": 0, + "content": "(a) Visual counting evaluation on Pixmo-Count [13] \nval set and test set." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.09, + 0.822, + 0.187 + ], + "angle": 0, + "content": "
methodsizeepochObject Detection
AP\\( AP_{50} \\)\\( AP_{75} \\)
YOLOv3 [51]-27327.949.228.3
Faster-RCNN [52]-1235.655.737.9
DETR [6]41M50042.062.444.2
Qwen2.5-VL [3]3B116.123.716.7
Perception-R1†3B131.946.733.4
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.505, + 0.189, + 0.82, + 0.214 + ], + "angle": 0, + "content": "(b) Object detection evaluation on COCO2017 [32] validation set." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.219, + 0.827, + 0.262 + ], + "angle": 0, + "content": "Table 3: Mainstream visual tasks evaluation including (a) visual object counting and (b) challenging general object detection. Notably, the results of expert model in (b) are copied from MMDetection [7]. \\(\\dagger\\) means Perception-R1 for object detection is build based on Qwen2.5-VL-3B-Instruct [3]." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.271, + 0.819, + 0.358 + ], + "angle": 0, + "content": "
llmMMBenchMMVetMMStar ScienceQASeedBenchMMELLaVA-BenchAI2D
AvgAvgAvgAvgAvgAvgAvgCognitionPerceptionAvgAvg
LLaVA1.5 [35]Vicuna1.5-7B62.832.832.665.460.1302.11338.352.651.9
LLaVA-NeXT [36]Vicuna1.5-7B66.037.937.768.269.1195.71419.552.767.4
Qwen2-VL [61]Qwen2-2B71.945.646.374.072.7418.51471.146.571.6
Perception-R1Qwen2-2B71.848.945.773.473.0430.01473.958.271.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.363, + 0.828, + 0.419 + ], + "angle": 0, + "content": "Table 4: General image understanding and reasoning evaluation, compared with various baselines. We select 8 mainstream multimodal benchmarks, i.e., MMBench [38], MMVet [72], MMStar [9], ScienceQA [53], SeedBench [18], MME [16], LLaVA-Bench [37], and ai2D [26] for the comprehensive understanding. We use the model after RL training in the counting tasks for the eval." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.436, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Perception-R1 on three mainstream benchmarks, refCOCO / + / g, and report Acc@0.5, Acc@0.75, and Acc@0.95 to comprehensively assess its visual grounding capability. We surprisingly find that several SoTA MLLMs exhibit poor performance on the more challenging Acc@0.95 metric, with scores even below 1%. In contrast, Perception-R1 achieves a stable performance of over 30% on this metric. This observation suggests that the community should prioritize reporting more discriminative results in future evaluations. The experimental results demonstrate that Perception-R1 exhibits strong competitiveness compared to both specialized and general-purpose models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.539, + 0.827, + 0.679 + ], + "angle": 0, + "content": "Optical Character Recognition (OCR) represents a critical task in visual perception due to its substantial practical value. Current methodologies predominantly adopt either expert models or fine-tuned generalist models for OCR. Perception-R1 pioneers the utilization of RL to further unlock the OCR capabilities of MLLM. As shown in Table 2, our proposed Perception-R1 achieves SoTA performance on the highly challenging OCR benchmark, i.e., PageOCR [34], demonstrating significant superiority over existing expert models, e.g., GOT (98.1 vs. 97.2 F1-score) and robust generalist models, e.g., LLaVA-NeXT (98.1 vs. 64.7 F1-score). Notably, Perception-R1 does not use the Chinese OCR data for training so it is a zero-shot performance for Chinese metric. This breakthrough substantiates the formidable potential of RL applications in OCR tasks, establishing new frontiers for enhancing textual understanding and recognition in complex visual environments." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.684, + 0.827, + 0.81 + ], + "angle": 0, + "content": "Visual Counting, as a fundamental vision task, necessitates models to accurately quantify category-specific instances within images, requiring robust visual logic to identify and enumerate targets through structured recognition patterns. In Perception-R1, we adopt a detect-then-count paradigm that reformulates the counting problem into a point detection process. As shown in Table 3a, Perception-R1 achieves remarkable counting performance, surpassing the current strong baselines by a substantial margin (17.9% improvement compared to Qwen2-VL in Pixmo val set). This advancement substantiates that RL effectively stimulates models to explore intrinsic visual logic mechanisms (Although counting yields deterministic results, the sequence of counting can exhibit distinct patterns.), thereby enhancing their capacity to resolve complex vision tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.815, + 0.828, + 0.913 + ], + "angle": 0, + "content": "General Object Detection, widely regarded as the crown jewel of computer vision tasks, has long been considered one of the most challenging problems in visual perception. As a pioneering endeavor to integrate RL into object detection, Perception-R1 achieves a groundbreaking milestone, serving as the first pure MLLM to surpass the \\(30+\\) AP threshold, i.e., 31.9 AP in Table 3b, on the COCO 2017 val set, matching or even exceeding the performance of specialized expert models. This achievement underscores rule-based RL's immense potential in addressing complex vision tasks requiring sophisticated visual-logic integration." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.202, + 0.088, + 0.795, + 0.219 + ], + "angle": 0, + "content": "
caseVisual GroundingOCR PageOCRVisual CountingDetection COCO2017
RefCOCORefCOCO+RefCOCOgPixmovalPixmotest
Perception-R189.181.785.798.478.175.631.9
w/o reward matching----77.175.423.5
w/o RL86.877.183.394.460.250.516.1
w thinking75.167.971.377.374.972.825.7
w/o thinking89.181.785.795.778.175.628.1
RL only89.181.785.795.778.175.631.9
SFT only88.280.784.695.358.059.925.9
SFT+RL88.480.785.197.377.175.430.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.221, + 0.825, + 0.29 + ], + "angle": 0, + "content": "Table 5: Ablation Study of Perception-R1. We perform ablation studies to investigate key properties of Perception-R1 across a range of visual perception tasks. Specifically, we report the Acc@0.5 for RefCOCO / + / g val set, the F1-score for PageOCR, the average scores for Pixmo-Count, and the AP metric for COCO2017 val set. w/o means without. Notably, there is no reward matching applied to visual grounding and OCR tasks, as these tasks do not involve the multi-subject reward." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.292, + 0.825, + 0.393 + ], + "angle": 0, + "content": "
reward functionCOCO2017
AP\\( AP_{50} \\)\\( AP_{75} \\)
format reward---
format reward + location reward (IoU)18.825.320.1
format reward + location reward (IoU) + cls reward20.227.321.4
format reward + location reward (IoU) + cls reward + recall reward (F1)27.642.028.7
format reward + location reward (IoU) + cls reward + recall reward (F1) + missing reward28.142.029.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.396, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Table 6: Reward design analysis of Perception-R1. pls reward indicates binary classification reward and missing reward is a penalty to penalize missed detections. To facilitate rapid experimentation, we randomly sampled 10k data from COCO2017 train set for this experiment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.827, + 0.515 + ], + "angle": 0, + "content": "General Visual Comprehension extends beyond pure perceptual tasks, and we evaluate Perception-R1 on multiple multimodal benchmarks. As shown in Table 4, we observe an intriguing phenomenon that models trained with RL for vision-specific tasks, e.g., counting task, exhibit concurrent performance gains in generic comprehension benchmarks. We attribute this cross-task enhancement to the perception policy learning, which drives the model to discover superior image interpretation patterns." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.533, + 0.441, + 0.548 + ], + "angle": 0, + "content": "5.2 Ablation Study of Perception-R1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.827, + 0.603 + ], + "angle": 0, + "content": "In this section, we aim to conduct a comprehensive ablation study to systematically investigate the contributions of critical components within Perception-R1. Experimental results are shown in Table 5. From the experimental results, we can derive three principal empirical findings:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.825, + 0.692 + ], + "angle": 0, + "content": "Reward matching enhances the explorability of multi-subject visual perception. As evidenced by the comparative results between row 1 and 2 in Table 5, replacing the bipartite matching with sequential matching leads to substantial performance degradation in both visual counting and object detection task. This suggests that sequential matching constrains the RL exploration space. On the contrast, the bipartite matching mechanism provides more possibility in reward assignment, enabling the model to explore optimal visual perception patterns." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.698, + 0.827, + 0.795 + ], + "angle": 0, + "content": "Explicit thinking processes prove non-essential for contemporary visual perception. Comparative analysis of row 3 and 4 reveals consistent performance degradation across all four evaluated perception tasks when incorporating an explicit thinking process during both training and inference phases. Similar phenomenon also emerges in image classification tasks [30]. We posit that this phenomenon arises because current visual perception tasks are more oriented toward visual logic rather than semantic logic. This shift implies that explicit language-centered reasoning processes are unnecessary, as models tend to focus more on learning implicit visual patterns." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Perceptual perplexity determines RL superiority over SFT. We compare the different combinations of post-training method, i.e., SFT, RL, and \\(\\mathrm{SFT + RL}\\), across four perception tasks, as shown in row 6, 7, 8 of Table 5. In tasks with high perceptual perplexity, such as counting and detection (multiple objects and categories), RL demonstrates superior performance enhancement compared to SFT or even \\(\\mathrm{SFT + RL}\\). Conversely, in low-perplexity tasks such as grounding and OCR, RL underperforms relative to SFT or \\(\\mathrm{SFT + RL}\\). This indicates that high perceptual perplexity a significant factor influencing the effectiveness of RL. It suggests that RL techniques should be applied to tasks with greater perceptual perplexity, where the exploration space for perception policy is larger." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.089, + 0.332, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.191, + 0.211, + 0.323, + 0.225 + ], + "angle": 0, + "content": "(a) Grounding reward" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.089, + 0.496, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.336, + 0.212, + 0.501, + 0.225 + ], + "angle": 0, + "content": "(b) Grounding performance" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.09, + 0.653, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.519, + 0.212, + 0.641, + 0.225 + ], + "angle": 0, + "content": "(c) Counting reward" + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.09, + 0.816, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.212, + 0.819, + 0.225 + ], + "angle": 0, + "content": "(d) Counting performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.228, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Figure 2: Scalability analysis of Perception-R1. We select two primary tasks: grounding and counting. We visualize the training reward curves under varying numbers of rollouts and evaluate the final performance of each task. All experiments are conducted with \\(5k\\) sampled data. And the default rollout number setting \\((1\\times)\\) is 8." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.306, + 0.38, + 0.321 + ], + "angle": 0, + "content": "5.3 More In-depth Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.336, + 0.826, + 0.366 + ], + "angle": 0, + "content": "In this section, we explore several key properties of Perception-R1 to further enhance our understanding of Perception Policy Learning with RL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Analysis of reward design for perception policy learning. We introduced the details of reward function of Perception-R1 in § 4.3. In this part, we examine the influence of these reward functions on perception policy learning. Specifically, using object detection as a case study, we incrementally integrate the designed answer reward into the format reward, as illustrated in Table 6. The results indicate that the progressive introduction of refined reward functions leads to consistent improvements in detection performance, ultimately exceeding the performance of expert models. This underscores the critical role of reward design in perception policy learning. Furthermore, it identifies a promising avenue for future research: the development of more refined and task-specific reward functions to enhance perception policy learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.627 + ], + "angle": 0, + "content": "Analysis of scaling up rollout for perception policy learning. The scalability of RL is a key concern of existing LLM post-training. In this part, we analyze the scalability of Perception-R1, focusing specifically on scaling up the number of rollouts. As shown in Figure 2, we conduct rollout-scaling experiments in two tasks: visual grounding and visual counting. The results indicate that increasing rollout count enhances reward optimization and final performance. This demonstrates Perception-R1's strong scaling properties and underscores the critical role of rollout quantity in scaling perception policies. By generating sufficient rollouts, the model broadens its exploration space, increasing the diversity of candidate solutions for reward evaluation. This expansion accelerates convergence to optimal visual perception patterns." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.659, + 0.431, + 0.675 + ], + "angle": 0, + "content": "6 Limitation and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.827, + 0.81 + ], + "angle": 0, + "content": "\"What can RL bring to MLLM?\" is a public question since the propose of DeepSeek-R1. Several latest works attempt to apply RL from the perspective of language-centric visual reasoning [39, 15, 41]. However, in this paper, we take a different pathway and argue that perception is a crucial prerequisite for visual reasoning. Only by fully unlocking the perception patterns of MLLMs can the models possess the ability to reason about complex visual tasks. Nevertheless, we regrettably find that many current perception tasks are overly simplistic, which limits the exploration space for RL. This, in turn, restricts the possibility of MLLMs achieving a perceptual \"Aha moment\" through thinking process. Finding more appropriate perception tasks, aka., meta task, may be the key to addressing this issue." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In a summary, this work takes a pioneering step in exploring the potential of rule-based RL in MLLM post-training for perception policy learning. Through extensive experimental analysis, we establish several valuable cognition about perception policy learning with RL. Driven by these findings, we build Perception-R1, a simple, effective, and scalable RL framework for efficient perception policy learning. Perception-R1 sets new SoTAs across multiple visual perception tasks, particularly in object detection tasks. By introducing a novel paradigm, it achieves and even surpasses the performance of expert models, thereby demonstrating the significant potential of perception policy learning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.113, + 0.826, + 0.143 + ], + "angle": 0, + "content": "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.151, + 0.825, + 0.193 + ], + "angle": 0, + "content": "[2] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.202, + 0.826, + 0.244 + ], + "angle": 0, + "content": "[3] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.253, + 0.825, + 0.283 + ], + "angle": 0, + "content": "[4] Lukas Blecher, Guillem Cucurull, Thomas Scialom, and Robert Stojnic. Nougat: Neural optical understanding for academic documents. arXiv preprint arXiv:2308.13418, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.291, + 0.827, + 0.346 + ], + "angle": 0, + "content": "[5] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.355, + 0.825, + 0.399 + ], + "angle": 0, + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.407, + 0.825, + 0.478 + ], + "angle": 0, + "content": "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.485, + 0.827, + 0.528 + ], + "angle": 0, + "content": "[8] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.537, + 0.825, + 0.58 + ], + "angle": 0, + "content": "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.588, + 0.827, + 0.631 + ], + "angle": 0, + "content": "[10] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.64, + 0.825, + 0.695 + ], + "angle": 0, + "content": "[11] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.704, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[12] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.113, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.207, + 0.091, + 0.827, + 0.299 + ], + "angle": 0, + "content": "T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, Zilin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.309, + 0.826, + 0.365 + ], + "angle": 0, + "content": "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.375, + 0.826, + 0.432 + ], + "angle": 0, + "content": "[14] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, Xiangwen Kong, Xiangyu Zhang, Kaisheng Ma, and Li Yi. DreamLLM: Synergistic multimodal comprehension and creation. In The Twelfth International Conference on Learning Representations, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.441, + 0.825, + 0.484 + ], + "angle": 0, + "content": "[15] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.493, + 0.826, + 0.548 + ], + "angle": 0, + "content": "[16] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.56, + 0.825, + 0.589 + ], + "angle": 0, + "content": "[17] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.598, + 0.825, + 0.627 + ], + "angle": 0, + "content": "[18] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.636, + 0.401, + 0.652 + ], + "angle": 0, + "content": "[19] GPT-4o. Hello gpt-4o, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.661, + 0.827, + 0.69 + ], + "angle": 0, + "content": "[20] Kaiming He, Georgia Gkioxari, Piotr Dólár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.699, + 0.827, + 0.742 + ], + "angle": 0, + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.751, + 0.827, + 0.808 + ], + "angle": 0, + "content": "[22] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14281-14290, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.818, + 0.825, + 0.86 + ], + "angle": 0, + "content": "[23] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. mplug-docowl 1.5: Unified structure learning forocr-free document understanding. arXiv preprint arXiv:2403.12895, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[24] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[25] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.202 + ], + "angle": 0, + "content": "[26] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.209, + 0.825, + 0.24 + ], + "angle": 0, + "content": "[27] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.825, + 0.29 + ], + "angle": 0, + "content": "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.298, + 0.824, + 0.33 + ], + "angle": 0, + "content": "[29] Jinyang Li, En Yu, Sijia Chen, and Wenbing Tao. Ovtr: End-to-end open-vocabulary multiple object tracking with transformer. arXiv preprint arXiv:2503.10616, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.338, + 0.827, + 0.368 + ], + "angle": 0, + "content": "[30] Ming Li, Shitian Zhao, Jike Zhong, Yuxiang Lai, and Kaipeng Zhang. Cls-rl: Image classification with rule-based reinforcement learning. arXiv preprint arXiv:2503.16188, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.375, + 0.825, + 0.407 + ], + "angle": 0, + "content": "[31] Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.414, + 0.827, + 0.472 + ], + "angle": 0, + "content": "[32] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.48, + 0.827, + 0.523 + ], + "angle": 0, + "content": "[33] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.532, + 0.827, + 0.576 + ], + "angle": 0, + "content": "[34] Chenglong Liu, Haoran Wei, Jinyue Chen, Lingyu Kong, Zheng Ge, Zining Zhu, Liang Zhao, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Focus anywhere for fine-grained multi-page document understanding. arXiv preprint arXiv:2405.14295, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.584, + 0.825, + 0.628 + ], + "angle": 0, + "content": "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.636, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[36] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.674, + 0.825, + 0.705 + ], + "angle": 0, + "content": "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.713, + 0.827, + 0.757 + ], + "angle": 0, + "content": "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.765, + 0.827, + 0.807 + ], + "angle": 0, + "content": "[39] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.817, + 0.825, + 0.861 + ], + "angle": 0, + "content": "[40] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11–20, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[41] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[42] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.648, + 0.158 + ], + "angle": 0, + "content": "[43] OpenAI. Chatgpt. https://openai.com/blog/chatgpt, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.165, + 0.69, + 0.181 + ], + "angle": 0, + "content": "[44] OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.188, + 0.581, + 0.204 + ], + "angle": 0, + "content": "[45] OpenAI. Learning to reason with llms, September 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.211, + 0.827, + 0.267 + ], + "angle": 0, + "content": "[46] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.275, + 0.827, + 0.331 + ], + "angle": 0, + "content": "[47] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.34, + 0.825, + 0.383 + ], + "angle": 0, + "content": "[48] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.39, + 0.827, + 0.447 + ], + "angle": 0, + "content": "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.455, + 0.827, + 0.498 + ], + "angle": 0, + "content": "[50] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.506, + 0.825, + 0.534 + ], + "angle": 0, + "content": "[51] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.542, + 0.827, + 0.585 + ], + "angle": 0, + "content": "[52] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.827, + 0.636 + ], + "angle": 0, + "content": "[53] Tanik Saikh, Tirthankar Ghosal, Amish Mittal, Asif Ekbal, and Pushpak Bhattacharyya. Scienceqa: A novel resource for question answering on scholarly articles. International Journal on Digital Libraries, 23(3):289-301, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.644, + 0.825, + 0.673 + ], + "angle": 0, + "content": "[54] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.681, + 0.827, + 0.724 + ], + "angle": 0, + "content": "[55] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.731, + 0.827, + 0.761 + ], + "angle": 0, + "content": "[56] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.768, + 0.827, + 0.811 + ], + "angle": 0, + "content": "[57] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.818, + 0.827, + 0.862 + ], + "angle": 0, + "content": "[58] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.869, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[59] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[60] Fei Wang, Wenxuan Zhou, James Y Huang, Nan Xu, Sheng Zhang, Hoifung Poon, and Muhao Chen. mdpo: Conditional preference optimization for multimodal large language models. arXiv preprint arXiv:2406.11839, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.825, + 0.186 + ], + "angle": 0, + "content": "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.194, + 0.827, + 0.251 + ], + "angle": 0, + "content": "[62] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.827, + 0.315 + ], + "angle": 0, + "content": "[63] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, Jinrong Yang, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Vary: Scaling up the vision vocabulary for large vision-language model. In European Conference on Computer Vision, pages 408-424. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.324, + 0.827, + 0.368 + ], + "angle": 0, + "content": "[64] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, En Yu, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Small language model meets with reinforced vision vocabulary. arXiv preprint arXiv:2401.12503, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.375, + 0.827, + 0.419 + ], + "angle": 0, + "content": "[65] Haoran Wei, Chenglong Liu, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, Zheng Ge, Liang Zhao, Jianjian Sun, Yuang Peng, et al. GeneralOCR theory: TowardsOCR-2.0 via a unified end-to-end model. arXiv preprint arXiv:2409.01704, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.427, + 0.827, + 0.47 + ], + "angle": 0, + "content": "[66] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.478, + 0.827, + 0.535 + ], + "angle": 0, + "content": "[67] Huajian Xin, Z. Z. Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, Wenjun Gao, Qihao Zhu, Dejian Yang, Zhibin Gou, Z. F. Wu, Fuli Luo, and Chong Ruan. Deepseek-prover-v1.5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.543, + 0.825, + 0.587 + ], + "angle": 0, + "content": "[68] En Yu, Kangheng Lin, Liang Zhao, Yana Wei, Zining Zhu, Haoran Wei, Jianjian Sun, Zheng Ge, Xiangyu Zhang, Jingyu Wang, et al. Unhackable temporal rewarding for scalable video mllms. arXiv preprint arXiv:2502.12081, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.827, + 0.638 + ], + "angle": 0, + "content": "[69] En Yu, Tiancai Wang, Zhuoling Li, Yang Zhang, Xiangyu Zhang, and Wenbing Tao. Motrv3: Releasefetch supervision for end-to-end multi-object tracking. arXiv preprint arXiv:2305.14298, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.646, + 0.825, + 0.689 + ], + "angle": 0, + "content": "[70] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.697, + 0.827, + 0.755 + ], + "angle": 0, + "content": "[71] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.762, + 0.827, + 0.806 + ], + "angle": 0, + "content": "[72] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.814, + 0.825, + 0.858 + ], + "angle": 0, + "content": "[73] Liang Zhao, En Yu, Zheng Ge, Jinrong Yang, Haoran Wei, Hongyu Zhou, Jianjian Sun, Huang Peng, Runpei Dong, Chunrui Han, et al. Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.866, + 0.825, + 0.909 + ], + "angle": 0, + "content": "[74] Zining Zhu, Liang Zhao, Kangheng Lin, Jinze Yang, En Yu, Chenglong Liu, Haoran Wei, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Perpo: Perceptual preference optimization via discriminative rewarding. arXiv preprint arXiv:2502.04371, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.909 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.091, + 0.293, + 0.108 + ], + "angle": 0, + "content": "A Appendix" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.825, + 0.164 + ], + "angle": 0, + "content": "In this appendix, we provide additional details about Perception-R1, which are omitted due to the 9-page limit of the main paper. Specifically, Section A.1 elaborates on the detailed dataset and training settings. Section A.2 presents more experimental results." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.18, + 0.541, + 0.195 + ], + "angle": 0, + "content": "A.1 Additional Details about Experimental Setting" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.205, + 0.825, + 0.247 + ], + "angle": 0, + "content": "More detailed dataset information of Perception-R1. In Section 4.3, we introduced what data was used for RL post-training of Perception-R1 on which tasks. In this part, we will provide more detailed information about the datasets, as shown in Table 7." + }, + { + "type": "table", + "bbox": [ + 0.245, + 0.258, + 0.75, + 0.345 + ], + "angle": 0, + "content": "
tasksdatasetsOriginalUsedRatio
visual groundingRefCOCO / RefCOCO+ / RefCOCOg320k5k1.56%
OCRPageOCR50k5k10%
visual countingPixMo-Count1.9M10k0.5%
object detectionCOCO2017110k110k100%
overall-2.38M130k-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Table 7: Training dataset statistics. Notably, we do not mix the data from different perception tasks for joint training because the rewards for different tasks vary." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.438 + ], + "angle": 0, + "content": "More detailed training setting information of Perception-R1. Section 4.3 elaborates on several key parameters of Perception-R1. In this part, we further demonstrate the diverse prompts employed for distinct perception tasks, as shown in Table 8." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.45, + 0.825, + 0.524 + ], + "angle": 0, + "content": "
taskssystem promptuser prompt
visual groundingQwen2-VLOutput the bounding box of the {question} in the image.
OCRQwen2-VLOCR this image.
visual countingQwen2-VLOutput all the bounding boxes of the {label}
object detectionQwen2.5-VLPlease output bbox coordinates and names of {90 categories of COCO}.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.529, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Table 8: Prompts of Perception-R1. The system prompt of Perception-R1 follows Qwen2-VL [61] and Qwen2.5-VL [3]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.582, + 0.446, + 0.598 + ], + "angle": 0, + "content": "A.2 Additional Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.825, + 0.636 + ], + "angle": 0, + "content": "In this section, we provide more qualitative analysis of Perception-R1 on multiple visual perception tasks. The selected cases are shown in Figure 3-6." + }, + { + "type": "image", + "bbox": [ + 0.21, + 0.651, + 0.788, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.87, + 0.71, + 0.884 + ], + "angle": 0, + "content": "Figure 3: Demo case of Percpetion-R1 on visual counting task." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.153, + 0.22, + 0.165 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.159, + 0.309, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.168, + 0.504, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.245, + 0.183, + 0.302, + 0.207 + ], + "angle": 0, + "content": "Riding Dirty" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.209, + 0.312, + 0.232 + ], + "angle": 0, + "content": "A muddy mix of road \nThe wind is a bit \nCyclocross doubles the \nthrill of both sports. Here's \nthe gear to get you started." + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.236, + 0.288, + 0.241 + ], + "angle": 0, + "content": "by BERNSTEIN 100VY" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.244, + 0.303, + 0.298 + ], + "angle": 0, + "content": "Cyclosis is a cool term. It means to drop-hotelize bikes with little or no need to walk. It is a course that often includes a variety of exercises, such as as well as obstacles that force you to get your legs on the ground. A bike over your shoulder. \"All you need is a bike and have a good attitude and confidence in your ability to walk,\" says Stu Thorne, founder and CEO of the professional cyclosis team." + }, + { + "type": "text", + "bbox": [ + 0.351, + 0.251, + 0.412, + 0.257 + ], + "angle": 0, + "content": "BEST PCHENTY LEVEL" + }, + { + "type": "text", + "bbox": [ + 0.353, + 0.258, + 0.495, + 0.28 + ], + "angle": 0, + "content": "What you canificn with extra weight you make up for with a highly capable automobile frame that you'll want to keep rolling between trips. A carbon-bond tire helps eat and move the vehicle. The car also has a high quality底盘, an excellent底盘, and a 100%底盘 driven power through any grade. And TBP cable disc brakes perform well even when the vehicle is on the road. The steering system is also very useful to consider if you're looking for a bike that can travel all way." + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.285, + 0.428, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.431, + 0.285, + 0.498, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.431, + 0.308, + 0.498, + 0.359 + ], + "angle": 0, + "content": "A premium neck bike, this should be your next choice. The Carbone Factory offers a 2000 inch, 16-in. front bottom bracket and relatively easy to install. It's also suitable for hard, solid, hard or soft through cracks. This is the best way to get one of these items. They can be run tubes to better make it easier to use. They mean they spring freely when called on to do so. They are lightweight and fast- and something you won't have to deal with in special area $3,000" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.369, + 0.291, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.406, + 0.286, + 0.442 + ], + "angle": 0, + "content": "Craft Shield Glove \nGlove \ngloves \ngloves from sailor \ngloves from sailor \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea" + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.369, + 0.34, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.299, + 0.406, + 0.338, + 0.442 + ], + "angle": 0, + "content": "Darn Tough Microer Crews can be a great way to complete your cyclical routines you know. The best way to get the best, fight, strong and cool are to do so with the \"power of the soul.\"" + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.371, + 0.397, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.347, + 0.406, + 0.419, + 0.442 + ], + "angle": 0, + "content": "Park Tool Rapho Brush Set Leng W 10mm 2. This tool usually dirts the skin and acts as a light source for key components. This kit the teeth are used to clean your bony look cheek off. You would need to purchase part.com e600 regal.co.uk" + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.369, + 0.441, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.406, + 0.446, + 0.442 + ], + "angle": 0, + "content": "Rapha Arm and Leg Warmers \nA has a lot of Lycia for the warm season. \nThe warm weather up, etc. \nThe warm air is coming from the warm air. \nLycia from E70" + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.369, + 0.498, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.452, + 0.406, + 0.498, + 0.442 + ], + "angle": 0, + "content": "Topeak SmartGaUGE D2 1000000000000000000000000000000000000000000000000000000000000000" + }, + { + "type": "text", + "bbox": [ + 0.333, + 0.445, + 0.413, + 0.45 + ], + "angle": 0, + "content": "MEN'S JOURNAL 94 NOVEMBER 2016" + }, + { + "type": "title", + "bbox": [ + 0.183, + 0.461, + 0.239, + 0.472 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.477, + 0.37, + 0.489 + ], + "angle": 0, + "content": "OCR this image." + }, + { + "type": "title", + "bbox": [ + 0.183, + 0.5, + 0.27, + 0.512 + ], + "angle": 0, + "content": "Perception-R1:" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.519, + 0.816, + 0.797 + ], + "angle": 0, + "content": " G E A R L A B\\nRiding Dirty\\n\\nA muddy mix of road- and mountain-bike racing, cyclocross doubles the thrill of both sports. Here's the gear to get you started.\\n\\nBryne Bredy\\n\\nCyclocross is as cool as it looks: Racers pedal drop-handlebikes with trail-ready tires through a course that often includes pavement, dirt, mud, and grass, as well as obstacles that force you to dismount and sling your bike over your shoulder. \"All you really need is to be tough and have a good attitude and a desire to suffer a little bit,\" says Stu Thorne, founder and director of the Cannondale professional cyclocross team. The right gear helps, too.\\nBest for Entry Level\\n\\nCAADX 105\\n\\nWhat you sacrifice with extra weight you make up for with a highly capable aluminum frame that'll keep you going between races. A carbon-blade fork helps eat up some of the pounding the courses can dish out. The dependable 2x11 speed Shimano 105 drivetrain powers through any grade, and TRP cable disc brakes perform well even in mud. Meanwhile, the 35cc tires and stable geometry make the CAADX a superfun commuter if you're looking for a bike that can multitask\\n\\nCannondale.com \\(\\mathbb{S}1,970\\backslash \\mathbb{n}\\backslash \\mathbb{n}\\)Best for Experienced Cyclists\\n\\nCruX Elite X1\\n\\nIf you already log long miles on a premium road bike, this should be your new cross rig. The carbon frame is as light as it is.\\n\\nLow bottom bracket and relatively slack head angle, which make it stable, helped us sail through rocks and roots and corner quickly. The tires can be run tubeless to better resist flats, and extra clearance means they spin freely when caked with mud. The CruX Elite is playful and fast — and something you won't outgrow as you collect medals.\\n\\nGlove\\n\\nThe cross season typically runs from September through February, so you'll need hearty gloves like these, with a fleece lining and a waterproof base, for warmth on wet race days. craftsports.us \\(\\mathbb{S}78\\backslash \\mathbb{n}\\backslash \\mathbb{N}\\)Darn Tough Micro Crew\\n\\nUnlike other bike races, cyclocross requires you to be on foot at times. So light, strong socks are key. These aren't likely to wear out, but Darn Tough will replace them if they do. darntough.com \\(\\mathbb{S}18\\backslash \\mathbb{n}\\backslash \\mathbb{N}\\)Park Tool Brush Set\\n\\nThe mud, dirt, and grime that builds up during off-road rides can damage key components. This kit does more than just keep your bike looking fresh; it keeps it healthy, too. parktool.com \\(\\mathbb{S}80\\backslash \\mathbb{n}\\backslash \\mathbb{n}\\)Rapha Arm and Leg Warmer\\n\\nThese merino layers, which have a bit of Lycra for stretch, peel off easily when the weather warms up. And they dry quickly, whether you sweat profusely or get caught in a sudden squall. rapha.cc From \\(\\mathbb{S}70\\backslash \\mathbb{n}\\backslash \\mathbb{N}\\)Topeak SmartGauge D2\\n\\nFor peak performance, adjust your tire pressure to suit the terrain. (On soft trails, lower pressure makes it grip better.) The SmartGauge makes it a snap with a fast, readable result. topeak.com \\(\\$ 40\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.838, + 0.676, + 0.853 + ], + "angle": 0, + "content": "Figure 4: Demo case of Percpetion-R1 on OCR task." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.154, + 0.244, + 0.165 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.156, + 0.49, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.156, + 0.537, + 0.166 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "image", + "bbox": [ + 0.542, + 0.156, + 0.783, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.286, + 0.263, + 0.297 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.3, + 0.625, + 0.311 + ], + "angle": 0, + "content": "Output the bounding box of the woman holding a child in the image." + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.321, + 0.289, + 0.332 + ], + "angle": 0, + "content": "Perception-R1:" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.335, + 0.484, + 0.346 + ], + "angle": 0, + "content": "The woman holding a child(157,459), (208,622)" + }, + { + "type": "image_caption", + "bbox": [ + 0.282, + 0.365, + 0.714, + 0.38 + ], + "angle": 0, + "content": "Figure 5: Demo case of Percpetion-R1 on visual grounding task." + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.513, + 0.246, + 0.524 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.513, + 0.487, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.514, + 0.535, + 0.525 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "image", + "bbox": [ + 0.54, + 0.514, + 0.777, + 0.636 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.645, + 0.263, + 0.655 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.658, + 0.64, + 0.67 + ], + "angle": 0, + "content": "Please output bbox coordinates and names of {90 categories of COCO}." + }, + { + "type": "title", + "bbox": [ + 0.214, + 0.679, + 0.29, + 0.689 + ], + "angle": 0, + "content": "Perception-R1:" + }, + { + "type": "code", + "bbox": [ + 0.258, + 0.691, + 0.498, + 0.796 + ], + "angle": 0, + "content": "[ {\"bbox_2d\": [435, 1, 616, 415], \"label\": \"person\"}, {\"bbox_2d\": [218, 112, 323, 253], \"label\": \"person\"}, {\"bbox_2d\": [147, 165, 225, 280], \"label\": \"person\"}, {\"bbox_2d\": [183, 126, 239, 242], \"label\": \"person\"}, {\"bbox_2d\": [276, 77, 338, 192], \"label\": \"person\"}, {\"bbox_2d\": [127, 54, 471, 406], \"label\": \"laptop\"}, {\"bbox_2d\": [108, 174, 156, 243], \"label\": \"cell phone\"}, {\"bbox_2d\": [26, 107, 103, 214], \"label\": \"potted plant\"}, {\"bbox_2d\": [64, 29, 280, 137], \"label\": \"chair\"} ]" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.82, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Figure 6: Demo case of Percpception-R1 on general object detection task. The color of bounding boxes correspond to the category." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "17" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_origin.pdf b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..23e711614506c84df8b4083f5d92398212993ce9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cf8eb3cd26e2b4bb35c5cf6aed7c79cc499835da8bc2f7d61a60e311db0db46 +size 3735733 diff --git a/data/2025/2504_07xxx/2504.07954/full.md b/data/2025/2504_07xxx/2504.07954/full.md new file mode 100644 index 0000000000000000000000000000000000000000..cd11cce527f4d64dd913cc87726e09b2f13a8d1c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/full.md @@ -0,0 +1,438 @@ +# Perception-R1: Pioneering Perception Policy with Reinforcement Learning + +En Yu $^{1,\mathbb{I}}$ , Kangheng Lin $^{2,\mathbb{I}}$ , Liang Zhao $^{3,\mathbb{I}}$ , Jisheng Yin $^{3}$ , Yana Wei $^{4}$ , Yuang Peng $^{5}$ , Haoran Wei $^{3}$ , Jianjian Sun $^{3}$ , Chunrui Han $^{3}$ , Zheng Ge $^{3}$ , Xiangyu Zhang $^{3}$ , Daxin Jiang $^{3}$ , Jingyu Wang $^{2}$ , Wenbing Tao $^{1\dagger}$ $^{1}$ Huazhong University of Science and Technology + $^{2}$ Beijing University of Posts and Telecommunications + $^{3}$ StepFun + $^{4}$ Johns Hopkins University + ${}^{5}$ Tingshua University +{yuen, wenbingtao}@hust.edu.cn +https://github.com/linkangheng/PR1 + +# Abstract + +Inspired by the success of DeepSeek-R1, we explore the potential of rule-based reinforcement learning (RL) in MLLM post-training for perception policy learning. While promising, our initial experiments reveal that incorporating a thinking process through RL does not consistently lead to performance gains across all visual perception tasks. This leads us to delve into the essential role of RL in the context of visual perception. In this work, we return to the fundamentals and explore the effects of RL on different perception tasks. We observe that the perceptual perplexity is a major factor in determining the effectiveness of RL. We also observe that reward design plays a crucial role in further approaching the upper limit of model perception. To leverage these findings, we propose Perceptron-R1, a scalable RL framework using GRPO during MLLM post-training. With a standard Qwen2-VL-2B-Instruct, Perception-R1 achieves $+4.2\%$ on RefCOCO+, $+17.9\%$ on PixMo-Count, $+4.2\%$ on PageOCR, and notably, $31.9\%$ AP on COCO2017 val1 for the first time, establishing a strong baseline for perception policy learning. + +# 1 Introduction + +"We do not see the world as it is, but as we are — or as we are conditioned to see it." + +Stephen R. Covey + +The landscape of large language model (LLM) has undergone a paradigm shift from non-reasoning foundation model, e.g., GPT-4/4o [44, 19], DeepSeek-V3 [33], to strongly reasoning model, e.g., OpenAI o1/o3 [45], DeepSeek-R1 [12], and Kimi-1.5 [57]. DeepSeek-R1, in particular, introduced a simple yet effective rule-based reinforcement learning (RL) approach [55], enabling emergent reasoning patterns without relying on traditional scaffolding techniques such as Monte Carlo Tree Search (MCTS) [17, 67] or Process Reward Models (PRM) [31]. This has catalyzed a new revolution in LLM post-training techniques, prompting researchers to develop more powerful reasoning language models [42, 24]. + +Despite these advancements, current explorations predominantly focus on the purely linguistic domain, and the unimodal nature of these reasoning models limits their ability to engage with the world in a truly perceptive way. To bridge this gap, this work takes a pioneering step in exploring + +the potential of perception policy learning within multimodal LLMs [61, 3] from lens of RL. While transferring RL techniques with reasoning processes, i.e., chain-of-thought [66], from the language domain shows promise on certain visual tasks, our empirical studies reveal that this approach is not universally effective. This inevitably prompts us to reexamine the role that RL play in visual perception tasks, and how the utilization of RL can lead to better and scalable perception policy. + +The current understanding of RL as a post-training technique is primarily grounded in purely linguistic tasks [24] and language-centric multimodal tasks [10]. However, the characteristics of visual perception tasks are fundamentally distinct from those of natural language, necessitating a revised understanding of RL in the context of visual perception. Specifically, visual perception possesses two unique properties, as follows: + +- Visual perception is embodied in the objective physical world. It possesses definite physical truth values, e.g., points, lines, or bounding boxes, but it lacks semantics compared to language. +- Visual perception, e.g., visual grounding and counting, are mostly "single-step" direct predictions. It lacks structured reasoning search space for RL exploration. + +These two characteristics determine that the application of RL to visual perception will have different properties from pure language [24] and language-centric multimodal [39, 41] approaches. In this work, we delve into the RL post-training of MLLM in the domain of visual perception, and further complements and extends the above understanding. Through extensive experimental analysis, we have uncovered several bitter yet valuable findings. + +- Explicit thinking process (CoT) during RL is not necessary for current perception policy. (§ 5.2) We observe that the model without thinking process performs better than the one with thinking process. +- Reward design plays a pivotal role in perception policy learning. (§ 5.3) An appropriate reward function will lead to a healthier learning curve and explore stronger perceptual patterns of MLLM. +- Perceptual perplexity determines RL superiority over SFT. (§ 5.2) We observe that RL can bring more significant improvement compared to SFT on more complex visual tasks, e.g., object detection. + +Driven by these findings, we present a simple, effective, and scalable RL framework, i.e., Perception-R1, for efficient perception policy learning. Inspired by mainstream language reasoning models [12, 57], Perception-R1 applies rule-based RL algorithm GRPO [55] during MLLM post-training stage. With a vanilla Qwen2-VL-2B-Instruct [61], Perception-R1 achieves significant improvement on multiple visual perception benchmarks, e.g., $+4.2\%$ on RefCOCO+ [40], $+17.9\%$ on PixMoCount [13], and $+4.2\%$ F1-score on PageOCR [34]. More importantly, Perception-R1 serves as the first time to enable a pure MLLM to reach $31.9\%$ mAP on the object detection benchmark COCO2017 [32] va1, showcasing the great potential of general foundation models to surpass expert models in mainstream visual tasks. We hope our method, results, and analysis will inspire future research on perception policy learning with RL. + +# 2 Related Works + +Multimodal Foundation and Reasoning Models. Recently, vision-language models [37, 3, 73, 70] have demonstrated remarkable capabilities in visual comprehension [64, 68] and generation [14, 48] through large-scale pretraining [2, 61] and visual instruction tuning [37, 35]. These models integrate visual modalities into a unified semantic space via visual encoders [49] and adapters [11, 37], while leveraging auto-regressive large language models [59, 1] as decoders for output generation. Despite the advancements in multimodal foundation models, their visual reasoning capabilities remain in an early developmental stage. Recent approaches [8, 39, 41] have explored reinforcement learning (RL) post-training to enhance visual reasoning. However, they primarily focus on language-centric tasks such as ambiguous reference resolution [39] and geometric problem-solving [41], while overlooking critical aspects of perception-driven reasoning. In this work, we take a pioneering step in utilizing RL for perception policy learning, aiming to bridge this gap and advance multimodal reasoning. + +Visual Perception in Multimodal Models. Visual Perception, as a concept in the field of computer vision [21, 52, 20, 69, 29], refers to the process of interpreting and understanding sensory, i.e., vision, information from the real-word. In the context of multimodal LLMs (MLLM), visual perception plays a crucial role in enabling the models to integrate, comprehend and reason visual information from the image or video. Existing MLLM generally enhance their visual perception capabilities by + +designing more advanced visual perception architectures [63, 64], more suitable visual-language modeling strategies [70, 68], and more sophisticated post-training techniques [74]. This work aims to explore the potential of further enhancing visual perception from the perspective of RL. + +RL-based Post-training in LLMs and MLLMs. Reinforcement learning (RL) has emerged as a pivotal paradigm for refining LLMs through alignment with human preferences and task-specific objectives. Prominent approaches like Reinforcement Learning from Human Feedback (RLHF) [46] and Direct Preference Optimization (DPO) [50] have demonstrated remarkable success in enhancing safety, coherence, and instruction-following capabilities of LLMs [43, 47, 44] and MLLMs [74, 60]. Recently, rule-based RL techniques, represented by GRPO [55], have demonstrated the potential for large-scale RL applications. LLMs have officially entered the era of strongly reasoning models. Subsequently, MLLMs [8, 39, 41] have also quickly followed this technology. However, so far, there has been no exciting, true "Aha Moment" in the multimodal domain. This study aims to investigate the potential contributions of RL to multimodal models, focusing on visual perception. + +# 3 Preliminaries + +Perception Policy Definition. The goal of perception policy in visual-language context is enabling the model to first $(i)$ extract and understand visual information from the environment [37, 68], then $(ii)$ perform logical reasoning based on this understanding [73, 70] to $(iii)$ accomplish specific tasks and further interact with the environment [5, 22]. In this work, we aim to empower the model to deal with a series of pure visual, e.g., counting, detection, and visual-language, e.g., grounding, optical character recognition (OCR), tasks through perception policy learning. + +Group Relative Policy Optimization (GRPO [55]) is a rule-based reinforcement learning algorithm tailored for post-training LLMs. Its core idea is to use group relative rewards to optimize the policy, eliminating the need for a separate critic model [54]. Specifically, GRPO samples multiple outputs $(\mathbf{o}_1 \sim \mathbf{o}_{\mathbf{g}}$ in Figure 1) from the old policy for the same input, calculates the average reward of these outputs as the baseline, and uses the relative rewards to guide policy updates. The optimization objective of GRPO can be formulated as following: + +$$ +\mathcal {J} _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {[ q \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\text {o l d}}} (O | q) ]} +$$ + +$$ +\frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| o _ {i} |} \sum_ {t = 1} ^ {| o _ {i} |} \left\{\min \left[ \frac {\pi_ {\theta} ^ {i , t}}{\pi_ {\theta_ {\mathrm {o l d}}} ^ {i , t}} \hat {A} _ {i, t}, \operatorname {c l i p} \left(\frac {\pi_ {\theta} ^ {i , t}}{\pi_ {\theta_ {\mathrm {o l d}}} ^ {i , t}}, 1 - \epsilon , 1 + \epsilon\right) \hat {A} _ {i, t} \right] - \beta \mathbb {D} _ {\mathrm {K L}} [ \pi_ {\theta} \| \pi_ {\mathrm {r e f}} ] \right\}, +$$ + +$$ +\mathbb {D} _ {\mathrm {K L}} \left[ \pi_ {\theta} \| \pi_ {\text {r e f}} \right] = \frac {\pi_ {\text {r e f}} \left(o _ {i , t} | q , o _ {i , < t}\right)}{\pi_ {\theta} \left(o _ {i , t} | q , o _ {i , < t}\right)} - \log \frac {\pi_ {\text {r e f}} \left(o _ {i , t} | q , o _ {i , < t}\right)}{\pi_ {\theta} \left(o _ {i , t} | q , o _ {i , < t}\right)} - 1, \tag {1} +$$ + +where $\epsilon$ and $\beta$ are hyper-parameters, and $\hat{A}_{i,t}$ is the advantage, computed using a group of rewards $\{r_1,r_2,\dots ,r_G\}$ corresponding to the outputs within each group. Refer to [12, 55] for more details. + +# 4 Perception-R1 + +In a nutshell, our Perception-R1 applies the rule-based RL algorithm GRPO [55] to the post-training stage of MLLM and optimizes the reward modeling to support perception policy learning. Figure 1 illustrates the idea, more approach and implementation details introduced next. + +# 4.1 Rule-based Reward Modeling + +The reward function serves as the principal training signal in reinforcement learning (RL), directing the optimization process. Existing LLM methods [12, 57, 24] basically apply a highly resilient, rule-based reward system consisting of only two reward types: Format Reward and Answer Reward. + +Format Reward. In existing LLM and MLLM, the output format is comprised of two essential components: the final output format and the intermediate reasoning process format. The reward for + +![](images/cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg) +Figure 1: Illustration of Perception-R1 framework. Following DeepSeek-R1 [12], we prompt MLLM model to generate several rollout responses and apply GRPO [55] during post-training stage. + +the final output is defined in accordance with specific task requirements and is typically encapsulated within `` tags, whereas the reward for the intermediate reasoning process generally mandates that the reasoning steps be enclosed within `` tags. Formally, + +$$ +S _ {\text {f o r m a t}} = \left\{ \begin{array}{l l} 1, & \text {i f f o r m a t i s c o r r e c t} \\ - 1, & \text {i f f o r m a t i s i n c o r r e c t} \end{array} \right. \tag {2} +$$ + +In Perception-R1, we follow this setting. A subtle difference emerges that visual perception task frequently requires the output of object coordinates, e.g., bounding box, lines, or points. Consequently, the output format must be strictly constrained to the $[x1, y1, x2, y2]$ structure. + +Answer Reward. The Answer Reward pertains to the correctness of model-generated responses, serving as a central consideration in reward design. Typically, outputs from language models are abstract and semantically rich, requiring validation through external mechanisms such as code-based ADE [12] or mathematical answer verification [55]. In contrast, visual perception tasks benefit from clearly defined physical ground truths, which simplify the development of a robust reward function. + +Perception-R1 diverges from LLM approaches by anchoring the reward mechanism in visual discrimination. This departure is pivotal, as it replaces the often implicit and subjective feedback mechanisms typical of language models with an explicit, quantifiable metric. Formally, discriminative reward $r_i$ can be represented as: + +$$ +r _ {i} = \Phi \left(o _ {i}, z\right), \tag {3} +$$ + +where $\Phi(\cdot)$ indicates the discriminative function, for example, IoU for bounding box and euclidean distance for point. By leveraging visual discrimination, we provide the model with a clear and objective feedback signal, ensuring the model's policy update with precise measured margin. + +# 4.2 Multi-Subject Reward Matching + +In natural environments, physical objects rarely appear in isolation and instead frequently co-occur in groups. This inherent complexity gives rise to a challenge we define as reward matching, which entails aligning the model's output with the corresponding ground truth before reward computation. Specifically, when prompting the model to predict the attributes of multiple subjects within an image, e.g., points and bounding box, it is necessary to determine the appropriate ground truth reference for each subject to ensure accurate reward assignment. + +Formally, let $y = \{y_{i}\}_{i=1}^{N}$ denote the set of predicted attributes for $N$ subjects, and let $z = \{z_{j}\}_{j=1}^{M}$ represent the corresponding ground truth attributes. We model the reward matching problem as a bipartite graph matching task, where one set of nodes corresponds to predictions and the other to ground truths. The edge weight between a prediction $y_{i}$ and a ground truth $t_{j}$ is determined by the + +reward function $\Phi(y_i, z_j)$ defined in Eq. 3, which measures their similarity or compatibility. The objective is to find the optimal assignment that maximizes the total reward: + +$$ +\hat {\sigma} = \underset {\sigma \in \Omega_ {N}} {\arg \max } \sum_ {i = 1} ^ {N} \Phi (y _ {i}, z _ {\sigma (i)}), \tag {4} +$$ + +where $\Omega_N$ is the set of all valid assignments between predictions and ground truths. To solve this optimization problem efficiently, we employ the Hungarian algorithm [27], a well-established method for bipartite graph matching that guarantees the optimal pairing by maximizing the overall reward (or equivalently, minimizing the cost). This ensures that each predicted attribute is accurately matched with its corresponding ground truth, thereby optimizing the reward computation process. + +After the optimal reward assignment is determined, we calculate the answer reward by aggregating the individual rewards for each subject. Mathematically, the overall reward score is defined as: + +$$ +S _ {\text {a n s w e r}} = \frac {1}{N} \sum_ {i = 1} ^ {N} \Phi (y _ {i}, z _ {\hat {\sigma} (i)}), \tag {5} +$$ + +$$ +S _ {\text {t o t a l}} = S _ {\text {f o r m a t}} + S _ {\text {a n s w e r}} +$$ + +where $\hat{\sigma}$ is the optimal assignment obtained via the Hungarian algorithm. In Perception-R1, we primarily use reward matching for visual counting and object detection tasks, as these involve multiple objects. + +# 4.3 Perception-R1 Configuration + +Model Setting. Our model implementation follows Qwen2-VL [61]. We mainly use the Qwen2-VL-Instruct-2B as the baseline model. We also utilize Qwen2.5-VL-3B-Instruct [3] for training object detection tasks, due to its specialized optimization for localizing bounding boxes. The input image resolution for Qwen2-VL is dynamic cooperated with 2D-RoPE [56]. + +Task and Data Setting. Given that Perception-R1 is primarily oriented towards pure visual and visual-language tasks, we select several mainstream and representative downstream tasks for perception policy learning, specifically including visual grounding, e.g., refCOCO [71] / + [71] / g [40], OCR, i.e., PageOCR [34], visual counting, i.e., Pixmo-Count [13], and object detection, i.e., COCO2017 [32]. For each task, a subset $(5k\sim 10k)$ of samples are respectively extracted as base data for individual post-training. More details are in appendix A.1. + +Training Setting. We focus on the RL-based post-training stage of MLLM. All the selected base models have already undergone pre-training and SFT stage. During RL stage, the initial learning rate is set as $1e - 6$ with 8 rollouts by default and a batch size of 1. The following are some important hyper-parameters during post-training. Prompts detailed settings are in the appendix A.1. + +
Gradient AccumulationRollout GKL CoefficientMax Response LenTemperature
280.0420481.0
+ +Reward Setting. We tailor distinct discriminative rewards for various visual perception tasks. For the grounding task, the reward is based on the Intersection over Union (IoU) between the predicted output and the ground truth. In the counting task, we adopt a paradigm similar to Qwen2.5-VL, which first detects points and then counts them. Here, the reward is derived from the Euclidean distance computed during reward matching. For OCR, the edit distance serves as the primary reward metric. Lastly, in object detection, we combine multiple rewards: an object number reward based on the F1 score, a location reward using IoU, and a binary classification reward with a missing penalty. + +Sampling Setting. Following Kimi-1.5 [57], we adopt a curriculum sampling strategy that begins with easier data and gradually transitions to more challenging examples. Specifically, for the object detection task, we first conduct offline training on the COCO dataset to compute reward values. Based on the selected rewards, i.e., number reward, we partition the dataset accordingly. As training advances, we progressively replace the data with more difficult samples (i.e., those associated with lower rewards) while concurrently increasing the rollout to broaden the model's exploration space. + +
methodsizeRefCOCO
val@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-87.590.482.6--------
OFA [62]-88.490.683.3--------
LLaVA-1.5 [35]7B49.154.943.310.713.66.90.40.30.320.122.9
LLaVA-NeXT [36]7B82.588.474.045.754.835.61.92.60.743.448.6
LLaVA-OV [28]7B73.082.363.524.229.615.90.50.50.532.637.5
Qwen2-VL [61]2B86.889.682.077.280.670.133.035.726.965.768.6
Perception-R12B89.191.484.579.583.672.435.038.528.867.971.2
RefCOCO+
methodsizeval@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-81.185.572.9--------
OFA [62]-81.387.174.2--------
LLaVA-1.5 [35]7B42.449.736.49.812.46.40.50.50.217.620.8
LLaVA-NeXT [36]7B74.584.064.741.551.830.01.92.71.039.346.2
LLaVA-OV [28]7B65.879.057.223.628.815.30.60.60.430.036.1
Qwen2-VL [61]2B77.182.570.168.773.860.029.432.323.058.462.9
Perception-R12B81.786.874.373.679.364.232.636.926.762.667.7
RefCOCOg
methodsizeval@50test@50val@75test@75val@95test@95valAvgtestAvg
MDETR [25]-83.383.3------
OFA [62]-82.282.3------
LLaVA-1.5 [35]7B43.245.18.59.30.30.317.318.2
LLaVA-NeXT [36]7B77.577.140.739.91.81.740.039.6
LLaVA-OV [28]7B70.870.823.323.60.60.731.631.7
Qwen2-VL [61]2B83.383.172.773.028.927.961.661.3
Perception-R12B85.785.475.776.032.133.164.564.8
+ +Table 1: Visual grounding benchmark evaluation. To comprehensively assess the model's grounding capability, we select referring expression comprehension (REC) benchmark, i.e., RefCOCO [71], RefCOCO+[71], and RefCOCOg[40] for evaluation. The expert model is denoted in gray. + +
sizeEdit Distance ↓F1-score ↑Precision ↑Recall ↑BLEU ↑METEOR ↑
enzhenzhenzhenzhenzhenzh
Nougat [4]250M25.5-74.5-72.0-80.9-66.5-76.1-
DocOwl1.5 [23]7B25.8-86.2-83.5-96.2-78.8-85.8-
GOT [65]580M3.53.897.298.097.198.297.397.894.787.895.893.9
Qwen2-VL [61]2B8.010.094.493.096.996.193.090.590.978.094.187.2
LLaVA-NeXT [36]7B43.0-64.7-57.3-88.1-47.8-58.2-
Perception-R12B3.59.098.294.498.696.397.892.796.774.698.188.9
+ +Table 2: PageOCR evaluation, compared with various strong expert and general models. "en" means English and "zh" means Chinese. + +# 5 Experiments + +The experimental section evaluates Perception-R1's performance on visual perception tasks (§ 5.1), followed by analytical experiments exploring reinforcement learning (RL)'s role in perception policy learning (§ 5.2). Finally, it discusses the interplay between visual perception and RL, along with key insights for perception policy learning (§ 5.3). + +# 5.1 Performance Landscape in Perception Tasks + +We evaluate Perception-R1 on mainstream perception tasks: visual grounding, counting, OCR, and object detection. Experiments use the datasets described in § 4.3 and benchmarks for image understanding. Results are in Tables 1-4. See Appendix A.2 for details. + +Visual Grounding is a task that involves localizing visual objects based on linguistic descriptions. Specifically, given a language prompt, the model is required to output the spatial coordinates of the subject (typically a single entity) described in the prompt. As shown in Table 1, we evaluate + +
methodsizeVisual Counting
PixmovalPixmotoetest
LLaVA-1.5 [35]7B33.331.0
LLaVA-1.6 [58]7B32.731.9
LLaVA-OV [28]7B55.853.7
Qwen2-VL [61]2B60.250.5
Perception-R12B78.175.6
+ +(a) Visual counting evaluation on Pixmo-Count [13] +val set and test set. + +
methodsizeepochObject Detection
AP\( AP_{50} \)\( AP_{75} \)
YOLOv3 [51]-27327.949.228.3
Faster-RCNN [52]-1235.655.737.9
DETR [6]41M50042.062.444.2
Qwen2.5-VL [3]3B116.123.716.7
Perception-R1†3B131.946.733.4
+ +(b) Object detection evaluation on COCO2017 [32] validation set. + +Table 3: Mainstream visual tasks evaluation including (a) visual object counting and (b) challenging general object detection. Notably, the results of expert model in (b) are copied from MMDetection [7]. $\dagger$ means Perception-R1 for object detection is build based on Qwen2.5-VL-3B-Instruct [3]. + +
llmMMBenchMMVetMMStar ScienceQASeedBenchMMELLaVA-BenchAI2D
AvgAvgAvgAvgAvgAvgAvgCognitionPerceptionAvgAvg
LLaVA1.5 [35]Vicuna1.5-7B62.832.832.665.460.1302.11338.352.651.9
LLaVA-NeXT [36]Vicuna1.5-7B66.037.937.768.269.1195.71419.552.767.4
Qwen2-VL [61]Qwen2-2B71.945.646.374.072.7418.51471.146.571.6
Perception-R1Qwen2-2B71.848.945.773.473.0430.01473.958.271.8
+ +Table 4: General image understanding and reasoning evaluation, compared with various baselines. We select 8 mainstream multimodal benchmarks, i.e., MMBench [38], MMVet [72], MMStar [9], ScienceQA [53], SeedBench [18], MME [16], LLaVA-Bench [37], and ai2D [26] for the comprehensive understanding. We use the model after RL training in the counting tasks for the eval. + +Perception-R1 on three mainstream benchmarks, refCOCO / + / g, and report Acc@0.5, Acc@0.75, and Acc@0.95 to comprehensively assess its visual grounding capability. We surprisingly find that several SoTA MLLMs exhibit poor performance on the more challenging Acc@0.95 metric, with scores even below 1%. In contrast, Perception-R1 achieves a stable performance of over 30% on this metric. This observation suggests that the community should prioritize reporting more discriminative results in future evaluations. The experimental results demonstrate that Perception-R1 exhibits strong competitiveness compared to both specialized and general-purpose models. + +Optical Character Recognition (OCR) represents a critical task in visual perception due to its substantial practical value. Current methodologies predominantly adopt either expert models or fine-tuned generalist models for OCR. Perception-R1 pioneers the utilization of RL to further unlock the OCR capabilities of MLLM. As shown in Table 2, our proposed Perception-R1 achieves SoTA performance on the highly challenging OCR benchmark, i.e., PageOCR [34], demonstrating significant superiority over existing expert models, e.g., GOT (98.1 vs. 97.2 F1-score) and robust generalist models, e.g., LLaVA-NeXT (98.1 vs. 64.7 F1-score). Notably, Perception-R1 does not use the Chinese OCR data for training so it is a zero-shot performance for Chinese metric. This breakthrough substantiates the formidable potential of RL applications in OCR tasks, establishing new frontiers for enhancing textual understanding and recognition in complex visual environments. + +Visual Counting, as a fundamental vision task, necessitates models to accurately quantify category-specific instances within images, requiring robust visual logic to identify and enumerate targets through structured recognition patterns. In Perception-R1, we adopt a detect-then-count paradigm that reformulates the counting problem into a point detection process. As shown in Table 3a, Perception-R1 achieves remarkable counting performance, surpassing the current strong baselines by a substantial margin (17.9% improvement compared to Qwen2-VL in Pixmo val set). This advancement substantiates that RL effectively stimulates models to explore intrinsic visual logic mechanisms (Although counting yields deterministic results, the sequence of counting can exhibit distinct patterns.), thereby enhancing their capacity to resolve complex vision tasks. + +General Object Detection, widely regarded as the crown jewel of computer vision tasks, has long been considered one of the most challenging problems in visual perception. As a pioneering endeavor to integrate RL into object detection, Perception-R1 achieves a groundbreaking milestone, serving as the first pure MLLM to surpass the $30+$ AP threshold, i.e., 31.9 AP in Table 3b, on the COCO 2017 val set, matching or even exceeding the performance of specialized expert models. This achievement underscores rule-based RL's immense potential in addressing complex vision tasks requiring sophisticated visual-logic integration. + +
caseVisual GroundingOCR PageOCRVisual CountingDetection COCO2017
RefCOCORefCOCO+RefCOCOgPixmovalPixmotest
Perception-R189.181.785.798.478.175.631.9
w/o reward matching----77.175.423.5
w/o RL86.877.183.394.460.250.516.1
w thinking75.167.971.377.374.972.825.7
w/o thinking89.181.785.795.778.175.628.1
RL only89.181.785.795.778.175.631.9
SFT only88.280.784.695.358.059.925.9
SFT+RL88.480.785.197.377.175.430.8
+ +Table 5: Ablation Study of Perception-R1. We perform ablation studies to investigate key properties of Perception-R1 across a range of visual perception tasks. Specifically, we report the Acc@0.5 for RefCOCO / + / g val set, the F1-score for PageOCR, the average scores for Pixmo-Count, and the AP metric for COCO2017 val set. w/o means without. Notably, there is no reward matching applied to visual grounding and OCR tasks, as these tasks do not involve the multi-subject reward. + +
reward functionCOCO2017
AP\( AP_{50} \)\( AP_{75} \)
format reward---
format reward + location reward (IoU)18.825.320.1
format reward + location reward (IoU) + cls reward20.227.321.4
format reward + location reward (IoU) + cls reward + recall reward (F1)27.642.028.7
format reward + location reward (IoU) + cls reward + recall reward (F1) + missing reward28.142.029.6
+ +Table 6: Reward design analysis of Perception-R1. pls reward indicates binary classification reward and missing reward is a penalty to penalize missed detections. To facilitate rapid experimentation, we randomly sampled 10k data from COCO2017 train set for this experiment. + +General Visual Comprehension extends beyond pure perceptual tasks, and we evaluate Perception-R1 on multiple multimodal benchmarks. As shown in Table 4, we observe an intriguing phenomenon that models trained with RL for vision-specific tasks, e.g., counting task, exhibit concurrent performance gains in generic comprehension benchmarks. We attribute this cross-task enhancement to the perception policy learning, which drives the model to discover superior image interpretation patterns. + +# 5.2 Ablation Study of Perception-R1 + +In this section, we aim to conduct a comprehensive ablation study to systematically investigate the contributions of critical components within Perception-R1. Experimental results are shown in Table 5. From the experimental results, we can derive three principal empirical findings: + +Reward matching enhances the explorability of multi-subject visual perception. As evidenced by the comparative results between row 1 and 2 in Table 5, replacing the bipartite matching with sequential matching leads to substantial performance degradation in both visual counting and object detection task. This suggests that sequential matching constrains the RL exploration space. On the contrast, the bipartite matching mechanism provides more possibility in reward assignment, enabling the model to explore optimal visual perception patterns. + +Explicit thinking processes prove non-essential for contemporary visual perception. Comparative analysis of row 3 and 4 reveals consistent performance degradation across all four evaluated perception tasks when incorporating an explicit thinking process during both training and inference phases. Similar phenomenon also emerges in image classification tasks [30]. We posit that this phenomenon arises because current visual perception tasks are more oriented toward visual logic rather than semantic logic. This shift implies that explicit language-centered reasoning processes are unnecessary, as models tend to focus more on learning implicit visual patterns. + +Perceptual perplexity determines RL superiority over SFT. We compare the different combinations of post-training method, i.e., SFT, RL, and $\mathrm{SFT + RL}$ , across four perception tasks, as shown in row 6, 7, 8 of Table 5. In tasks with high perceptual perplexity, such as counting and detection (multiple objects and categories), RL demonstrates superior performance enhancement compared to SFT or even $\mathrm{SFT + RL}$ . Conversely, in low-perplexity tasks such as grounding and OCR, RL underperforms relative to SFT or $\mathrm{SFT + RL}$ . This indicates that high perceptual perplexity a significant factor influencing the effectiveness of RL. It suggests that RL techniques should be applied to tasks with greater perceptual perplexity, where the exploration space for perception policy is larger. + +![](images/3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg) +(a) Grounding reward + +![](images/f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg) +(b) Grounding performance + +![](images/aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg) +(c) Counting reward +Figure 2: Scalability analysis of Perception-R1. We select two primary tasks: grounding and counting. We visualize the training reward curves under varying numbers of rollouts and evaluate the final performance of each task. All experiments are conducted with $5k$ sampled data. And the default rollout number setting $(1\times)$ is 8. + +![](images/346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg) +(d) Counting performance + +# 5.3 More In-depth Analysis + +In this section, we explore several key properties of Perception-R1 to further enhance our understanding of Perception Policy Learning with RL. + +Analysis of reward design for perception policy learning. We introduced the details of reward function of Perception-R1 in § 4.3. In this part, we examine the influence of these reward functions on perception policy learning. Specifically, using object detection as a case study, we incrementally integrate the designed answer reward into the format reward, as illustrated in Table 6. The results indicate that the progressive introduction of refined reward functions leads to consistent improvements in detection performance, ultimately exceeding the performance of expert models. This underscores the critical role of reward design in perception policy learning. Furthermore, it identifies a promising avenue for future research: the development of more refined and task-specific reward functions to enhance perception policy learning. + +Analysis of scaling up rollout for perception policy learning. The scalability of RL is a key concern of existing LLM post-training. In this part, we analyze the scalability of Perception-R1, focusing specifically on scaling up the number of rollouts. As shown in Figure 2, we conduct rollout-scaling experiments in two tasks: visual grounding and visual counting. The results indicate that increasing rollout count enhances reward optimization and final performance. This demonstrates Perception-R1's strong scaling properties and underscores the critical role of rollout quantity in scaling perception policies. By generating sufficient rollouts, the model broadens its exploration space, increasing the diversity of candidate solutions for reward evaluation. This expansion accelerates convergence to optimal visual perception patterns. + +# 6 Limitation and Conclusion + +"What can RL bring to MLLM?" is a public question since the propose of DeepSeek-R1. Several latest works attempt to apply RL from the perspective of language-centric visual reasoning [39, 15, 41]. However, in this paper, we take a different pathway and argue that perception is a crucial prerequisite for visual reasoning. Only by fully unlocking the perception patterns of MLLMs can the models possess the ability to reason about complex visual tasks. Nevertheless, we regrettably find that many current perception tasks are overly simplistic, which limits the exploration space for RL. This, in turn, restricts the possibility of MLLMs achieving a perceptual "Aha moment" through thinking process. Finding more appropriate perception tasks, aka., meta task, may be the key to addressing this issue. + +In a summary, this work takes a pioneering step in exploring the potential of rule-based RL in MLLM post-training for perception policy learning. Through extensive experimental analysis, we establish several valuable cognition about perception policy learning with RL. Driven by these findings, we build Perception-R1, a simple, effective, and scalable RL framework for efficient perception policy learning. Perception-R1 sets new SoTAs across multiple visual perception tasks, particularly in object detection tasks. By introducing a novel paradigm, it achieves and even surpasses the performance of expert models, thereby demonstrating the significant potential of perception policy learning. + +# References + +[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. +[2] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. +[3] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +[4] Lukas Blecher, Guillem Cucurull, Thomas Scialom, and Robert Stojnic. Nougat: Neural optical understanding for academic documents. arXiv preprint arXiv:2308.13418, 2023. +[5] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023. +[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. +[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. +[8] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. +[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. +[10] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025. +[11] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024. +[12] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun + +T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, Zilin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. +[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. +[14] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, Xiangwen Kong, Xiangyu Zhang, Kaisheng Ma, and Li Yi. DreamLLM: Synergistic multimodal comprehension and creation. In The Twelfth International Conference on Learning Representations, 2024. +[15] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. +[16] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. +[17] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning, 2024. +[18] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023. +[19] GPT-4o. Hello gpt-4o, 2024. +[20] Kaiming He, Georgia Gkioxari, Piotr Dólár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. +[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. +[22] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14281-14290, 2024. +[23] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. mplug-docowl 1.5: Unified structure learning forocr-free document understanding. arXiv preprint arXiv:2403.12895, 2024. +[24] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025. + +[25] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021. +[26] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016. +[27] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955. +[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. +[29] Jinyang Li, En Yu, Sijia Chen, and Wenbing Tao. Ovtr: End-to-end open-vocabulary multiple object tracking with transformer. arXiv preprint arXiv:2503.10616, 2025. +[30] Ming Li, Shitian Zhao, Jike Zhong, Yuxiang Lai, and Kaipeng Zhang. Cls-rl: Image classification with rule-based reinforcement learning. arXiv preprint arXiv:2503.16188, 2025. +[31] Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. +[32] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. +[33] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. +[34] Chenglong Liu, Haoran Wei, Jinyue Chen, Lingyu Kong, Zheng Ge, Zining Zhu, Liang Zhao, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Focus anywhere for fine-grained multi-page document understanding. arXiv preprint arXiv:2405.14295, 2024. +[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. +[36] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. +[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. +[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. +[39] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. +[40] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11–20, 2016. +[41] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. + +[42] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025. +[43] OpenAI. Chatgpt. https://openai.com/blog/chatgpt, 2022. +[44] OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +[45] OpenAI. Learning to reason with llms, September 2024. +[46] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022. +[47] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022. +[48] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024. +[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. +[50] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024. +[51] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018. +[52] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016. +[53] Tanik Saikh, Tirthankar Ghosal, Amish Mittal, Asif Ekbal, and Pushpak Bhattacharyya. Scienceqa: A novel resource for question answering on scholarly articles. International Journal on Digital Libraries, 23(3):289-301, 2022. +[54] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +[55] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[56] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. +[57] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +[58] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. +[59] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. + +[60] Fei Wang, Wenxuan Zhou, James Y Huang, Nan Xu, Sheng Zhang, Hoifung Poon, and Muhao Chen. mdpo: Conditional preference optimization for multimodal large language models. arXiv preprint arXiv:2406.11839, 2024. +[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. +[62] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. +[63] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, Jinrong Yang, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Vary: Scaling up the vision vocabulary for large vision-language model. In European Conference on Computer Vision, pages 408-424. Springer, 2024. +[64] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, En Yu, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Small language model meets with reinforced vision vocabulary. arXiv preprint arXiv:2401.12503, 2024. +[65] Haoran Wei, Chenglong Liu, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, Zheng Ge, Liang Zhao, Jianjian Sun, Yuang Peng, et al. GeneralOCR theory: TowardsOCR-2.0 via a unified end-to-end model. arXiv preprint arXiv:2409.01704, 2024. +[66] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022. +[67] Huajian Xin, Z. Z. Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, Wenjun Gao, Qihao Zhu, Dejian Yang, Zhibin Gou, Z. F. Wu, Fuli Luo, and Chong Ruan. Deepseek-prover-v1.5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search, 2024. +[68] En Yu, Kangheng Lin, Liang Zhao, Yana Wei, Zining Zhu, Haoran Wei, Jianjian Sun, Zheng Ge, Xiangyu Zhang, Jingyu Wang, et al. Unhackable temporal rewarding for scalable video mllms. arXiv preprint arXiv:2502.12081, 2025. +[69] En Yu, Tiancai Wang, Zhuoling Li, Yang Zhang, Xiangyu Zhang, and Wenbing Tao. Motrv3: Releasefetch supervision for end-to-end multi-object tracking. arXiv preprint arXiv:2305.14298, 2023. +[70] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023. +[71] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016. +[72] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. +[73] Liang Zhao, En Yu, Zheng Ge, Jinrong Yang, Haoran Wei, Hongyu Zhou, Jianjian Sun, Huang Peng, Runpei Dong, Chunrui Han, et al. Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474, 2023. +[74] Zining Zhu, Liang Zhao, Kangheng Lin, Jinze Yang, En Yu, Chenglong Liu, Haoran Wei, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Perpo: Perceptual preference optimization via discriminative rewarding. arXiv preprint arXiv:2502.04371, 2025. + +# A Appendix + +In this appendix, we provide additional details about Perception-R1, which are omitted due to the 9-page limit of the main paper. Specifically, Section A.1 elaborates on the detailed dataset and training settings. Section A.2 presents more experimental results. + +# A.1 Additional Details about Experimental Setting + +More detailed dataset information of Perception-R1. In Section 4.3, we introduced what data was used for RL post-training of Perception-R1 on which tasks. In this part, we will provide more detailed information about the datasets, as shown in Table 7. + +
tasksdatasetsOriginalUsedRatio
visual groundingRefCOCO / RefCOCO+ / RefCOCOg320k5k1.56%
OCRPageOCR50k5k10%
visual countingPixMo-Count1.9M10k0.5%
object detectionCOCO2017110k110k100%
overall-2.38M130k-
+ +More detailed training setting information of Perception-R1. Section 4.3 elaborates on several key parameters of Perception-R1. In this part, we further demonstrate the diverse prompts employed for distinct perception tasks, as shown in Table 8. + +Table 7: Training dataset statistics. Notably, we do not mix the data from different perception tasks for joint training because the rewards for different tasks vary. + +
taskssystem promptuser prompt
visual groundingQwen2-VLOutput the bounding box of the {question} in the image.
OCRQwen2-VLOCR this image.
visual countingQwen2-VLOutput all the bounding boxes of the {label}
object detectionQwen2.5-VLPlease output bbox coordinates and names of {90 categories of COCO}.
+ +Table 8: Prompts of Perception-R1. The system prompt of Perception-R1 follows Qwen2-VL [61] and Qwen2.5-VL [3]. + +# A.2 Additional Experimental Results + +In this section, we provide more qualitative analysis of Perception-R1 on multiple visual perception tasks. The selected cases are shown in Figure 3-6. + +![](images/0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg) +Figure 3: Demo case of Percpetion-R1 on visual counting task. + +![](images/66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg) +Input: + +![](images/17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg) +Figure 4: Demo case of Percpetion-R1 on OCR task. + +# Riding Dirty + +A muddy mix of road +The wind is a bit +Cyclocross doubles the +thrill of both sports. Here's +the gear to get you started. + +by BERNSTEIN 100VY + +Cyclosis is a cool term. It means to drop-hotelize bikes with little or no need to walk. It is a course that often includes a variety of exercises, such as as well as obstacles that force you to get your legs on the ground. A bike over your shoulder. "All you need is a bike and have a good attitude and confidence in your ability to walk," says Stu Thorne, founder and CEO of the professional cyclosis team. + +BEST PCHENTY LEVEL + +What you canificn with extra weight you make up for with a highly capable automobile frame that you'll want to keep rolling between trips. A carbon-bond tire helps eat and move the vehicle. The car also has a high quality底盘, an excellent底盘, and a 100%底盘 driven power through any grade. And TBP cable disc brakes perform well even when the vehicle is on the road. The steering system is also very useful to consider if you're looking for a bike that can travel all way. + +![](images/89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg) + +![](images/579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg) + +A premium neck bike, this should be your next choice. The Carbone Factory offers a 2000 inch, 16-in. front bottom bracket and relatively easy to install. It's also suitable for hard, solid, hard or soft through cracks. This is the best way to get one of these items. They can be run tubes to better make it easier to use. They mean they spring freely when called on to do so. They are lightweight and fast- and something you won't have to deal with in special area $3,000 + +![](images/228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg) + +Craft Shield Glove +Glove +gloves +gloves from sailor +gloves from sailor +gloves from sea +gloves from sea +gloves from sea +gloves from sea +gloves from sea +gloves from sea + +![](images/7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg) + +Darn Tough Microer Crews can be a great way to complete your cyclical routines you know. The best way to get the best, fight, strong and cool are to do so with the "power of the soul." + +![](images/0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg) + +Park Tool Rapho Brush Set Leng W 10mm 2. This tool usually dirts the skin and acts as a light source for key components. This kit the teeth are used to clean your bony look cheek off. You would need to purchase part.com e600 regal.co.uk + +![](images/cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg) + +Rapha Arm and Leg Warmers +A has a lot of Lycia for the warm season. +The warm weather up, etc. +The warm air is coming from the warm air. +Lycia from E70 + +![](images/01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg) + +Topeak SmartGaUGE D2 1000000000000000000000000000000000000000000000000000000000000000 + +MEN'S JOURNAL 94 NOVEMBER 2016 + +# Question: + +OCR this image. + +# Perception-R1: + +G E A R L A B\nRiding Dirty\n\nA muddy mix of road- and mountain-bike racing, cyclocross doubles the thrill of both sports. Here's the gear to get you started.\n\nBryne Bredy\n\nCyclocross is as cool as it looks: Racers pedal drop-handlebikes with trail-ready tires through a course that often includes pavement, dirt, mud, and grass, as well as obstacles that force you to dismount and sling your bike over your shoulder. "All you really need is to be tough and have a good attitude and a desire to suffer a little bit," says Stu Thorne, founder and director of the Cannondale professional cyclocross team. The right gear helps, too.\nBest for Entry Level\n\nCAADX 105\n\nWhat you sacrifice with extra weight you make up for with a highly capable aluminum frame that'll keep you going between races. A carbon-blade fork helps eat up some of the pounding the courses can dish out. The dependable 2x11 speed Shimano 105 drivetrain powers through any grade, and TRP cable disc brakes perform well even in mud. Meanwhile, the 35cc tires and stable geometry make the CAADX a superfun commuter if you're looking for a bike that can multitask\n\nCannondale.com $\mathbb{S}1,970\backslash \mathbb{n}\backslash \mathbb{n}$ Best for Experienced Cyclists\n\nCruX Elite X1\n\nIf you already log long miles on a premium road bike, this should be your new cross rig. The carbon frame is as light as it is.\n\nLow bottom bracket and relatively slack head angle, which make it stable, helped us sail through rocks and roots and corner quickly. The tires can be run tubeless to better resist flats, and extra clearance means they spin freely when caked with mud. The CruX Elite is playful and fast — and something you won't outgrow as you collect medals.\n\nGlove\n\nThe cross season typically runs from September through February, so you'll need hearty gloves like these, with a fleece lining and a waterproof base, for warmth on wet race days. craftsports.us $\mathbb{S}78\backslash \mathbb{n}\backslash \mathbb{N}$ Darn Tough Micro Crew\n\nUnlike other bike races, cyclocross requires you to be on foot at times. So light, strong socks are key. These aren't likely to wear out, but Darn Tough will replace them if they do. darntough.com $\mathbb{S}18\backslash \mathbb{n}\backslash \mathbb{N}$ Park Tool Brush Set\n\nThe mud, dirt, and grime that builds up during off-road rides can damage key components. This kit does more than just keep your bike looking fresh; it keeps it healthy, too. parktool.com $\mathbb{S}80\backslash \mathbb{n}\backslash \mathbb{n}$ Rapha Arm and Leg Warmer\n\nThese merino layers, which have a bit of Lycra for stretch, peel off easily when the weather warms up. And they dry quickly, whether you sweat profusely or get caught in a sudden squall. rapha.cc From $\mathbb{S}70\backslash \mathbb{n}\backslash \mathbb{N}$ Topeak SmartGauge D2\n\nFor peak performance, adjust your tire pressure to suit the terrain. (On soft trails, lower pressure makes it grip better.) The SmartGauge makes it a snap with a fast, readable result. topeak.com $\$ 40$ + +![](images/dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg) +Input: + +![](images/cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg) +Output: +Figure 5: Demo case of Percpetion-R1 on visual grounding task. + +# Question: + +Output the bounding box of the woman holding a child in the image. + +# Perception-R1: + +The woman holding a child(157,459), (208,622) + +![](images/e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg) +Input: +Figure 6: Demo case of Percpception-R1 on general object detection task. The color of bounding boxes correspond to the category. + +![](images/10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg) +Output: + +# Question: + +Please output bbox coordinates and names of {90 categories of COCO}. + +# Perception-R1: + +```json +[ {"bbox_2d": [435, 1, 616, 415], "label": "person"}, {"bbox_2d": [218, 112, 323, 253], "label": "person"}, {"bbox_2d": [147, 165, 225, 280], "label": "person"}, {"bbox_2d": [183, 126, 239, 242], "label": "person"}, {"bbox_2d": [276, 77, 338, 192], "label": "person"}, {"bbox_2d": [127, 54, 471, 406], "label": "laptop"}, {"bbox_2d": [108, 174, 156, 243], "label": "cell phone"}, {"bbox_2d": [26, 107, 103, 214], "label": "potted plant"}, {"bbox_2d": [64, 29, 280, 137], "label": "chair"} ] +``` \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07954/images/01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg b/data/2025/2504_07xxx/2504.07954/images/01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bf0ec081f7e010582a001408f67d4df45e078bf --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08756b2e1a5db80709de9619b1acd6ca1db588d86127e4ca9b82388a7d859bca +size 1953 diff --git a/data/2025/2504_07xxx/2504.07954/images/0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg b/data/2025/2504_07xxx/2504.07954/images/0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bc3e95a75aa0c5195aafbd06e5db128009da9cb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72608988b6b10a3326da8cfe78c23e65d34b47370910c7edf67d3e97cc8dbe90 +size 54943 diff --git a/data/2025/2504_07xxx/2504.07954/images/07edbdbf2347c24f3eb016b1588bbda5eeb612ef769c5013c6c6fdee06542113.jpg b/data/2025/2504_07xxx/2504.07954/images/07edbdbf2347c24f3eb016b1588bbda5eeb612ef769c5013c6c6fdee06542113.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cdc98fafeb4503fa322aeb0f169a6e646a2bd7e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/07edbdbf2347c24f3eb016b1588bbda5eeb612ef769c5013c6c6fdee06542113.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c4c76248c765a7356b605bd7f0c75f89047572e82ab46c5c3ca62a4844b2b66 +size 11683 diff --git a/data/2025/2504_07xxx/2504.07954/images/0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg b/data/2025/2504_07xxx/2504.07954/images/0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dea90947eefdfa7b7678bf127910ea8a04dbae8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eec5bcbf4a6c394c6357e2c5f924efa3512f6f51c74615c2b1dd7d0cffb5ed8b +size 2471 diff --git a/data/2025/2504_07xxx/2504.07954/images/10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg b/data/2025/2504_07xxx/2504.07954/images/10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb2d8b0e1ae47a3c851fd8299d5f65222441fffe --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:444b6c7404875e3e821611dbaa417b41f8a7107c14c941d71caaf03de53e0612 +size 22629 diff --git a/data/2025/2504_07xxx/2504.07954/images/17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg b/data/2025/2504_07xxx/2504.07954/images/17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73482875268bfe994ff1352976fa3e8a4e391592 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc33e8c99efb2513e2d346b54d365386b2f981c12726baa30aa2e8ca3525cc02 +size 11240 diff --git a/data/2025/2504_07xxx/2504.07954/images/17e909db05a21591ec24e42bc0b1d2eb629c10109a7cd0731914d981373ef4e4.jpg b/data/2025/2504_07xxx/2504.07954/images/17e909db05a21591ec24e42bc0b1d2eb629c10109a7cd0731914d981373ef4e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..947e0ff25056781bf6001f3b40b3e4e10dd405d4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/17e909db05a21591ec24e42bc0b1d2eb629c10109a7cd0731914d981373ef4e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be0353a8fbba2368bba9f74c063c0945299a893db8a656b9b9241e885fad0bd5 +size 46680 diff --git a/data/2025/2504_07xxx/2504.07954/images/228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg b/data/2025/2504_07xxx/2504.07954/images/228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a78112fb8cea46d8a9c5d9686038059c9dc47ea --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6706ab09b84cf528ea87ab09fc12086c7df6bdd9bc9785a10e96e4c8467c6b3f +size 2073 diff --git a/data/2025/2504_07xxx/2504.07954/images/22e55b7f6a2c932d951d78c5fb9270f67a7affd9a4fdf14064edc50383995f6b.jpg b/data/2025/2504_07xxx/2504.07954/images/22e55b7f6a2c932d951d78c5fb9270f67a7affd9a4fdf14064edc50383995f6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4df35d0e667336394eafd22cfa1c544d22a1c874 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/22e55b7f6a2c932d951d78c5fb9270f67a7affd9a4fdf14064edc50383995f6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dafd965f5c044852c0d8e4c8b9ab839bee3336a6e938ce372a89445c16f85d4 +size 22669 diff --git a/data/2025/2504_07xxx/2504.07954/images/3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg b/data/2025/2504_07xxx/2504.07954/images/3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b24b6c93463e94a0b1de4068ce94f59d8e15709f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095a720da0ff9e461f1153d0017ef5dfae53306f8e77fa0e4c1a45ad56a58a1b +size 11789 diff --git a/data/2025/2504_07xxx/2504.07954/images/346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg b/data/2025/2504_07xxx/2504.07954/images/346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45b35a587b71260d17f7e6edc07ca3359d7cacb6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c65fb8c35ebeca2c40b4c2827522769ee479be7c5e7ca150c1f51cb0f19fb9e3 +size 10931 diff --git a/data/2025/2504_07xxx/2504.07954/images/44f539dac4b155cf2ac95483e3794e84df26f8133927bc788378cd03789aa417.jpg b/data/2025/2504_07xxx/2504.07954/images/44f539dac4b155cf2ac95483e3794e84df26f8133927bc788378cd03789aa417.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54805ffbeea2000e8dad7b59a28356519ca4cf9d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/44f539dac4b155cf2ac95483e3794e84df26f8133927bc788378cd03789aa417.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5086428b5c6806680baa641ab794bdf3cc287579041ad03f8f64b78ec2b53fe1 +size 5775 diff --git a/data/2025/2504_07xxx/2504.07954/images/4875cffcb0b4f6689680415c915db9a02d73f410732e3a17c1b934d8f4cc1304.jpg b/data/2025/2504_07xxx/2504.07954/images/4875cffcb0b4f6689680415c915db9a02d73f410732e3a17c1b934d8f4cc1304.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bddea2f5a6332237fe0ba62fdf400c79d24815ed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/4875cffcb0b4f6689680415c915db9a02d73f410732e3a17c1b934d8f4cc1304.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9619c1e172bfe90123e3b8103ad682a36aa4afc8338dfacb899e005335ebeba2 +size 2796 diff --git a/data/2025/2504_07xxx/2504.07954/images/55b5bc36f1f90b8a6eb20d10517abeb0c8acaf1b7cf86d5aab7489ab807a0bba.jpg b/data/2025/2504_07xxx/2504.07954/images/55b5bc36f1f90b8a6eb20d10517abeb0c8acaf1b7cf86d5aab7489ab807a0bba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac0dfd62d2c787b2d65b71fba755c22856d94b8b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/55b5bc36f1f90b8a6eb20d10517abeb0c8acaf1b7cf86d5aab7489ab807a0bba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:032116f9d2b2df2345e3ef3bf54680e4f347ab4f2bd990215b311df5537f90ee +size 14061 diff --git a/data/2025/2504_07xxx/2504.07954/images/568578d2d46e7653152d99d920d5ccffedb75b5abfb947b17fdb84cd37721d67.jpg b/data/2025/2504_07xxx/2504.07954/images/568578d2d46e7653152d99d920d5ccffedb75b5abfb947b17fdb84cd37721d67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..888170de57c3e6842b1b373eacc63a10e7f5a484 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/568578d2d46e7653152d99d920d5ccffedb75b5abfb947b17fdb84cd37721d67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:330ca8dbfa9742167e9e034d9567d9f6e0434a3bd8f89b6b2d44e5ec6e49cfbe +size 48744 diff --git a/data/2025/2504_07xxx/2504.07954/images/579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg b/data/2025/2504_07xxx/2504.07954/images/579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5fe5873031f2125f03269db021563a222b8e163 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e51948b21611e65005a5c8c898a513568a8b18d2a0a605e20d29cb4f646cd0ac +size 2261 diff --git a/data/2025/2504_07xxx/2504.07954/images/60e4f5a6e447288126a3a996a74edce8999b36c1645b0807a13af873d56e9b91.jpg b/data/2025/2504_07xxx/2504.07954/images/60e4f5a6e447288126a3a996a74edce8999b36c1645b0807a13af873d56e9b91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cea02b97c0cb3cf94d84ad648920c6d699f9ef9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/60e4f5a6e447288126a3a996a74edce8999b36c1645b0807a13af873d56e9b91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da8bfda0e3c430ec56184183803178808aef5de68f196f107bc09b96de6fb8cf +size 36569 diff --git a/data/2025/2504_07xxx/2504.07954/images/66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg b/data/2025/2504_07xxx/2504.07954/images/66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01526e895e11a78d1eaf52d4ef5623745145e281 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2841b9d8b9180fa45fdc91d878a8c580c7d943bc64fd62417a7eae5b47b00d3b +size 1439 diff --git a/data/2025/2504_07xxx/2504.07954/images/67a5388ba51edf551e5fad1bcf4c017c24711e7b0aff42cdea97dc8a11150861.jpg b/data/2025/2504_07xxx/2504.07954/images/67a5388ba51edf551e5fad1bcf4c017c24711e7b0aff42cdea97dc8a11150861.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ad8aa0757802d48711b38a7dabf7153f0871096 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/67a5388ba51edf551e5fad1bcf4c017c24711e7b0aff42cdea97dc8a11150861.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bc4ad71cd00414233f0be0451fee26cd9ffa73a5debb692740ef5a01bd42fbc +size 22385 diff --git a/data/2025/2504_07xxx/2504.07954/images/6b1dde97d407fde40d64c3c974e5631c46e7f989a63f11f79ab2852f4c146256.jpg b/data/2025/2504_07xxx/2504.07954/images/6b1dde97d407fde40d64c3c974e5631c46e7f989a63f11f79ab2852f4c146256.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8e93aa4a628dca81f80e1a95ebe80ddb746ff2d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/6b1dde97d407fde40d64c3c974e5631c46e7f989a63f11f79ab2852f4c146256.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1e8be5a13e66ca862e409a9f6a6c077eb101949d7fe46f705aa45b9fe5cdc30 +size 47341 diff --git a/data/2025/2504_07xxx/2504.07954/images/788b19990e74a79322dd7e3e24bf51810e61970ac44292f29051f5632e9aeb1e.jpg b/data/2025/2504_07xxx/2504.07954/images/788b19990e74a79322dd7e3e24bf51810e61970ac44292f29051f5632e9aeb1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69bd806fc76fab23dcb3bc1911489e5a6cb545a6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/788b19990e74a79322dd7e3e24bf51810e61970ac44292f29051f5632e9aeb1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba0536bc73dc99c4bbd7bf643e8b98e7da5e54fc5017bfcaa600b5d5a0d986ed +size 4824 diff --git a/data/2025/2504_07xxx/2504.07954/images/7a149f44bd3a4f9a877011b9e1b5c1f1d3918685adfd74b02f83493a39d8af53.jpg b/data/2025/2504_07xxx/2504.07954/images/7a149f44bd3a4f9a877011b9e1b5c1f1d3918685adfd74b02f83493a39d8af53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dfbe52d1e8cb4877cf72b345719c98a3221fb29 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/7a149f44bd3a4f9a877011b9e1b5c1f1d3918685adfd74b02f83493a39d8af53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa7698ff7020345f18cfd31c6379299e2ba4777f73500276a123106c85f8691f +size 7600 diff --git a/data/2025/2504_07xxx/2504.07954/images/7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg b/data/2025/2504_07xxx/2504.07954/images/7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05a6a751ed699ba9d336caeeb87356dd4f306834 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0086a24002a8d246bae7ccd096ff0abfba6b3f5da87edfd94b74e5cefd7d17ae +size 1589 diff --git a/data/2025/2504_07xxx/2504.07954/images/89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg b/data/2025/2504_07xxx/2504.07954/images/89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33f3ec549833c8d58eac81722178d8daae65466b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e7a6bb3559b8ddf3cb7d7f8552ab1401a9e90cdbcc94ea4dcebb317f834a806 +size 12262 diff --git a/data/2025/2504_07xxx/2504.07954/images/89c30d1304cdad26e2e739586527ddda8f446e389ea70502641287d4b639d78e.jpg b/data/2025/2504_07xxx/2504.07954/images/89c30d1304cdad26e2e739586527ddda8f446e389ea70502641287d4b639d78e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9adccefaf3c135583e3461ef249cdb4e5fb6a222 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/89c30d1304cdad26e2e739586527ddda8f446e389ea70502641287d4b639d78e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7785e7c9260705fe845a5a208645e9f37b11781df090ee512855b017dcd53d6f +size 13248 diff --git a/data/2025/2504_07xxx/2504.07954/images/98895dacdebc846941fa4240b973e0e77e41234c273900b597e8f824a43d6b97.jpg b/data/2025/2504_07xxx/2504.07954/images/98895dacdebc846941fa4240b973e0e77e41234c273900b597e8f824a43d6b97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0759ea5b08a5994fb220f78a58a988e80caeb08 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/98895dacdebc846941fa4240b973e0e77e41234c273900b597e8f824a43d6b97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:131104d02c9ccb07b7bebdd38e1445caa912ed75ffbcd333368ec3228f536c3d +size 32046 diff --git a/data/2025/2504_07xxx/2504.07954/images/9c08b3fd0a306124eb2afdc3e450ee13676fd58299babefc599a7cf81205271b.jpg b/data/2025/2504_07xxx/2504.07954/images/9c08b3fd0a306124eb2afdc3e450ee13676fd58299babefc599a7cf81205271b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5181f7530ba976430c49f1ed7327a4fcd28c83b4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/9c08b3fd0a306124eb2afdc3e450ee13676fd58299babefc599a7cf81205271b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:351eda1d2b7fc1561e0c41d5da37f66c8efde195e7bcad61c7547143a48a654b +size 2960 diff --git a/data/2025/2504_07xxx/2504.07954/images/9f7a0c8ae83a0967e411883ad44f42e7f3042fa2b28d0b197b66dc12d7173cd3.jpg b/data/2025/2504_07xxx/2504.07954/images/9f7a0c8ae83a0967e411883ad44f42e7f3042fa2b28d0b197b66dc12d7173cd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dbdd178716bea454cca8e32e4991e5f8dc44c68 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/9f7a0c8ae83a0967e411883ad44f42e7f3042fa2b28d0b197b66dc12d7173cd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0197a3ebdc6a2bc34bb51cdbbf031986191584567ea82553356691a963fc3cbb +size 6183 diff --git a/data/2025/2504_07xxx/2504.07954/images/aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg b/data/2025/2504_07xxx/2504.07954/images/aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb3966e0cc499e7fae00a2738c176fe2f13cad08 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b97ad88f44c1f1649fe8e03316d7639166ab50f49ef59648771d494387021a61 +size 10710 diff --git a/data/2025/2504_07xxx/2504.07954/images/b7035c312ed2b32280d4a48e901452b0da27d5e28e8481473c4d5374ca58c5bf.jpg b/data/2025/2504_07xxx/2504.07954/images/b7035c312ed2b32280d4a48e901452b0da27d5e28e8481473c4d5374ca58c5bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4e7f5f15dfa251b775b313a7477c187bb52b89f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/b7035c312ed2b32280d4a48e901452b0da27d5e28e8481473c4d5374ca58c5bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad50f10ccd7069a376414b1e14df807e7499ca1ae456d5fe2ec31b7e633d72f4 +size 58622 diff --git a/data/2025/2504_07xxx/2504.07954/images/cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg b/data/2025/2504_07xxx/2504.07954/images/cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4171769ebbf1ab309065f88ff1c9e2aa481b0b7e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff7cb0f0f12e02b7747d4fffdc29da459129cb197ee0049e5676584dc54f7057 +size 44564 diff --git a/data/2025/2504_07xxx/2504.07954/images/cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg b/data/2025/2504_07xxx/2504.07954/images/cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce67be3b0f447c578306da661b4c2dade7fbea7a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b8ddf379c5820d93b2b49f66f22c824be397031cb2bfa4dfaae84c4bdd315d +size 33297 diff --git a/data/2025/2504_07xxx/2504.07954/images/cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg b/data/2025/2504_07xxx/2504.07954/images/cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef508efa2dcd73d91eb89f5c9c861b6ccf932fb5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48686269056f89e705c70061950eb84844ceff89ac9f42ba97f0a61ce22e7c18 +size 1679 diff --git a/data/2025/2504_07xxx/2504.07954/images/dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg b/data/2025/2504_07xxx/2504.07954/images/dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80823c74a26a566191d2b27d2b0505b05eb29576 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c12fe0ea7fd952846d0de4a8125641509cc4d961a6f6fbbe375d3a47c978b0 +size 33387 diff --git a/data/2025/2504_07xxx/2504.07954/images/e44a87e5b4cbe43f61787ce03abac0772efea141fb0986021b5a52e9dcc58f20.jpg b/data/2025/2504_07xxx/2504.07954/images/e44a87e5b4cbe43f61787ce03abac0772efea141fb0986021b5a52e9dcc58f20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce81c55570ee1e43f1cf7d338d6ae459ff89f069 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/e44a87e5b4cbe43f61787ce03abac0772efea141fb0986021b5a52e9dcc58f20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e08f53e34a8e2871c1c6125614b0315ac9780c5cc3844a8657e0f2bb5f38d1d2 +size 175849 diff --git a/data/2025/2504_07xxx/2504.07954/images/e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg b/data/2025/2504_07xxx/2504.07954/images/e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89e9dab71f246793f0dd7c7ca296cb6f83a2284f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6afa355d8d81f1a1c4ffc90d21a4338ac102dec6ee213c17f53ea290588eb726 +size 19643 diff --git a/data/2025/2504_07xxx/2504.07954/images/f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg b/data/2025/2504_07xxx/2504.07954/images/f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ceea6ce7550a862691fb87f8c5452d245c65bce1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/images/f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8d214ee0c423c9135fde10ebbfffba48ac6944bcbd7bc081559a0da8485f5b7 +size 12301 diff --git a/data/2025/2504_07xxx/2504.07954/layout.json b/data/2025/2504_07xxx/2504.07954/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3bd33e80e27bcdaf5155dc19c5f2bf749f5b4969 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07954/layout.json @@ -0,0 +1,10484 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 122, + 97, + 489, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 97, + 489, + 137 + ], + "spans": [ + { + "bbox": [ + 122, + 97, + 489, + 137 + ], + "type": "text", + "content": "Perception-R1: Pioneering Perception Policy with Reinforcement Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "En Yu" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{1,\\mathbb{I}}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Kangheng Lin" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{2,\\mathbb{I}}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Liang Zhao" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3,\\mathbb{I}}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Jisheng Yin" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Yana Wei" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Yuang Peng" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Haoran Wei" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Jianjian Sun" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Chunrui Han" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Zheng Ge" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Xiangyu Zhang" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Daxin Jiang" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Jingyu Wang" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": ", Wenbing Tao" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "Huazhong University of Science and Technology \n" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "Beijing University of Posts and Telecommunications \n" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "StepFun \n" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "Johns Hopkins University \n" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "inline_equation", + "content": "{}^{5}" + }, + { + "bbox": [ + 113, + 177, + 500, + 270 + ], + "type": "text", + "content": "Tingshua University \n{yuen, wenbingtao}@hust.edu.cn \nhttps://github.com/linkangheng/PR1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 281, + 298, + 329, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 298, + 329, + 310 + ], + "spans": [ + { + "bbox": [ + 281, + 298, + 329, + 310 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "spans": [ + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "content": "Inspired by the success of DeepSeek-R1, we explore the potential of rule-based reinforcement learning (RL) in MLLM post-training for perception policy learning. While promising, our initial experiments reveal that incorporating a thinking process through RL does not consistently lead to performance gains across all visual perception tasks. This leads us to delve into the essential role of RL in the context of visual perception. In this work, we return to the fundamentals and explore the effects of RL on different perception tasks. We observe that the perceptual perplexity is a major factor in determining the effectiveness of RL. We also observe that reward design plays a crucial role in further approaching the upper limit of model perception. To leverage these findings, we propose Perceptron-R1, a scalable RL framework using GRPO during MLLM post-training. With a standard Qwen2-VL-2B-Instruct, Perception-R1 achieves " + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "inline_equation", + "content": "+4.2\\%" + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "content": " on RefCOCO+, " + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "inline_equation", + "content": "+17.9\\%" + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "content": " on PixMo-Count, " + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "inline_equation", + "content": "+4.2\\%" + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "content": " on PageOCR, and notably, " + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "inline_equation", + "content": "31.9\\%" + }, + { + "bbox": [ + 140, + 323, + 470, + 477 + ], + "type": "text", + "content": " AP on COCO2017 val1 for the first time, establishing a strong baseline for perception policy learning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 498, + 190, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 190, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 190, + 510 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 525, + 420, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 525, + 420, + 536 + ], + "spans": [ + { + "bbox": [ + 113, + 525, + 420, + 536 + ], + "type": "text", + "content": "\"We do not see the world as it is, but as we are — or as we are conditioned to see it.\"" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 429, + 543, + 494, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 543, + 494, + 553 + ], + "spans": [ + { + "bbox": [ + 429, + 543, + 494, + 553 + ], + "type": "text", + "content": "Stephen R. Covey" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 566, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 566, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 504, + 654 + ], + "type": "text", + "content": "The landscape of large language model (LLM) has undergone a paradigm shift from non-reasoning foundation model, e.g., GPT-4/4o [44, 19], DeepSeek-V3 [33], to strongly reasoning model, e.g., OpenAI o1/o3 [45], DeepSeek-R1 [12], and Kimi-1.5 [57]. DeepSeek-R1, in particular, introduced a simple yet effective rule-based reinforcement learning (RL) approach [55], enabling emergent reasoning patterns without relying on traditional scaffolding techniques such as Monte Carlo Tree Search (MCTS) [17, 67] or Process Reward Models (PRM) [31]. This has catalyzed a new revolution in LLM post-training techniques, prompting researchers to develop more powerful reasoning language models [42, 24]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": "Despite these advancements, current explorations predominantly focus on the purely linguistic domain, and the unimodal nature of these reasoning models limits their ability to engage with the world in a truly perceptive way. To bridge this gap, this work takes a pioneering step in exploring" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.07954v1 [cs.CV] 10 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 700, + 276, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 700, + 276, + 712 + ], + "spans": [ + { + "bbox": [ + 117, + 700, + 276, + 712 + ], + "type": "text", + "content": "†Corresponding author, † Core contribution" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "the potential of perception policy learning within multimodal LLMs [61, 3] from lens of RL. While transferring RL techniques with reasoning processes, i.e., chain-of-thought [66], from the language domain shows promise on certain visual tasks, our empirical studies reveal that this approach is not universally effective. This inevitably prompts us to reexamine the role that RL play in visual perception tasks, and how the utilization of RL can lead to better and scalable perception policy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "type": "text", + "content": "The current understanding of RL as a post-training technique is primarily grounded in purely linguistic tasks [24] and language-centric multimodal tasks [10]. However, the characteristics of visual perception tasks are fundamentally distinct from those of natural language, necessitating a revised understanding of RL in the context of visual perception. Specifically, visual perception possesses two unique properties, as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 103, + 192, + 504, + 241 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 103, + 192, + 504, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 192, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 103, + 192, + 504, + 215 + ], + "type": "text", + "content": "- Visual perception is embodied in the objective physical world. It possesses definite physical truth values, e.g., points, lines, or bounding boxes, but it lacks semantics compared to language." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 219, + 504, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 219, + 504, + 241 + ], + "spans": [ + { + "bbox": [ + 103, + 219, + 504, + 241 + ], + "type": "text", + "content": "- Visual perception, e.g., visual grounding and counting, are mostly \"single-step\" direct predictions. It lacks structured reasoning search space for RL exploration." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 247, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 301 + ], + "type": "text", + "content": "These two characteristics determine that the application of RL to visual perception will have different properties from pure language [24] and language-centric multimodal [39, 41] approaches. In this work, we delve into the RL post-training of MLLM in the domain of visual perception, and further complements and extends the above understanding. Through extensive experimental analysis, we have uncovered several bitter yet valuable findings." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 103, + 306, + 504, + 380 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 103, + 306, + 504, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 306, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 103, + 306, + 504, + 329 + ], + "type": "text", + "content": "- Explicit thinking process (CoT) during RL is not necessary for current perception policy. (§ 5.2) We observe that the model without thinking process performs better than the one with thinking process." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 103, + 332, + 504, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 332, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 103, + 332, + 504, + 355 + ], + "type": "text", + "content": "- Reward design plays a pivotal role in perception policy learning. (§ 5.3) An appropriate reward function will lead to a healthier learning curve and explore stronger perceptual patterns of MLLM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 103, + 357, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 357, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 103, + 357, + 504, + 380 + ], + "type": "text", + "content": "- Perceptual perplexity determines RL superiority over SFT. (§ 5.2) We observe that RL can bring more significant improvement compared to SFT on more complex visual tasks, e.g., object detection." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "content": "Driven by these findings, we present a simple, effective, and scalable RL framework, i.e., Perception-R1, for efficient perception policy learning. Inspired by mainstream language reasoning models [12, 57], Perception-R1 applies rule-based RL algorithm GRPO [55] during MLLM post-training stage. With a vanilla Qwen2-VL-2B-Instruct [61], Perception-R1 achieves significant improvement on multiple visual perception benchmarks, e.g., " + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "inline_equation", + "content": "+4.2\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "content": " on RefCOCO+ [40], " + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "inline_equation", + "content": "+17.9\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "content": " on PixMoCount [13], and " + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "inline_equation", + "content": "+4.2\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "content": " F1-score on PageOCR [34]. More importantly, Perception-R1 serves as the first time to enable a pure MLLM to reach " + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "inline_equation", + "content": "31.9\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 501 + ], + "type": "text", + "content": " mAP on the object detection benchmark COCO2017 [32] va1, showcasing the great potential of general foundation models to surpass expert models in mainstream visual tasks. We hope our method, results, and analysis will inspire future research on perception policy learning with RL." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 516, + 202, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 516, + 202, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 202, + 529 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": "Multimodal Foundation and Reasoning Models. Recently, vision-language models [37, 3, 73, 70] have demonstrated remarkable capabilities in visual comprehension [64, 68] and generation [14, 48] through large-scale pretraining [2, 61] and visual instruction tuning [37, 35]. These models integrate visual modalities into a unified semantic space via visual encoders [49] and adapters [11, 37], while leveraging auto-regressive large language models [59, 1] as decoders for output generation. Despite the advancements in multimodal foundation models, their visual reasoning capabilities remain in an early developmental stage. Recent approaches [8, 39, 41] have explored reinforcement learning (RL) post-training to enhance visual reasoning. However, they primarily focus on language-centric tasks such as ambiguous reference resolution [39] and geometric problem-solving [41], while overlooking critical aspects of perception-driven reasoning. In this work, we take a pioneering step in utilizing RL for perception policy learning, aiming to bridge this gap and advance multimodal reasoning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "type": "text", + "content": "Visual Perception in Multimodal Models. Visual Perception, as a concept in the field of computer vision [21, 52, 20, 69, 29], refers to the process of interpreting and understanding sensory, i.e., vision, information from the real-word. In the context of multimodal LLMs (MLLM), visual perception plays a crucial role in enabling the models to integrate, comprehend and reason visual information from the image or video. Existing MLLM generally enhance their visual perception capabilities by" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "designing more advanced visual perception architectures [63, 64], more suitable visual-language modeling strategies [70, 68], and more sophisticated post-training techniques [74]. This work aims to explore the potential of further enhancing visual perception from the perspective of RL." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "content": "RL-based Post-training in LLMs and MLLMs. Reinforcement learning (RL) has emerged as a pivotal paradigm for refining LLMs through alignment with human preferences and task-specific objectives. Prominent approaches like Reinforcement Learning from Human Feedback (RLHF) [46] and Direct Preference Optimization (DPO) [50] have demonstrated remarkable success in enhancing safety, coherence, and instruction-following capabilities of LLMs [43, 47, 44] and MLLMs [74, 60]. Recently, rule-based RL techniques, represented by GRPO [55], have demonstrated the potential for large-scale RL applications. LLMs have officially entered the era of strongly reasoning models. Subsequently, MLLMs [8, 39, 41] have also quickly followed this technology. However, so far, there has been no exciting, true \"Aha Moment\" in the multimodal domain. This study aims to investigate the potential contributions of RL to multimodal models, focusing on visual perception." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 237, + 195, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 195, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 195, + 249 + ], + "type": "text", + "content": "3 Preliminaries" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "text", + "content": "Perception Policy Definition. The goal of perception policy in visual-language context is enabling the model to first " + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "text", + "content": " extract and understand visual information from the environment [37, 68], then " + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "text", + "content": " perform logical reasoning based on this understanding [73, 70] to " + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "inline_equation", + "content": "(iii)" + }, + { + "bbox": [ + 104, + 262, + 504, + 330 + ], + "type": "text", + "content": " accomplish specific tasks and further interact with the environment [5, 22]. In this work, we aim to empower the model to deal with a series of pure visual, e.g., counting, detection, and visual-language, e.g., grounding, optical character recognition (OCR), tasks through perception policy learning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 334, + 505, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 505, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 505, + 400 + ], + "type": "text", + "content": "Group Relative Policy Optimization (GRPO [55]) is a rule-based reinforcement learning algorithm tailored for post-training LLMs. Its core idea is to use group relative rewards to optimize the policy, eliminating the need for a separate critic model [54]. Specifically, GRPO samples multiple outputs " + }, + { + "bbox": [ + 104, + 334, + 505, + 400 + ], + "type": "inline_equation", + "content": "(\\mathbf{o}_1 \\sim \\mathbf{o}_{\\mathbf{g}}" + }, + { + "bbox": [ + 104, + 334, + 505, + 400 + ], + "type": "text", + "content": " in Figure 1) from the old policy for the same input, calculates the average reward of these outputs as the baseline, and uses the relative rewards to guide policy updates. The optimization objective of GRPO can be formulated as following:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 424, + 276, + 438 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 424, + 276, + 438 + ], + "spans": [ + { + "bbox": [ + 115, + 424, + 276, + 438 + ], + "type": "interline_equation", + "content": "\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\text {o l d}}} (O | q) ]}", + "image_path": "788b19990e74a79322dd7e3e24bf51810e61970ac44292f29051f5632e9aeb1e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 153, + 441, + 496, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 441, + 496, + 475 + ], + "spans": [ + { + "bbox": [ + 153, + 441, + 496, + 475 + ], + "type": "interline_equation", + "content": "\\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}} \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} ^ {i , t}}{\\pi_ {\\theta_ {\\mathrm {o l d}}} ^ {i , t}}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t} \\right] - \\beta \\mathbb {D} _ {\\mathrm {K L}} [ \\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}} ] \\right\\},", + "image_path": "55b5bc36f1f90b8a6eb20d10517abeb0c8acaf1b7cf86d5aab7489ab807a0bba.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 176, + 491, + 504, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 491, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 176, + 491, + 504, + 517 + ], + "type": "interline_equation", + "content": "\\mathbb {D} _ {\\mathrm {K L}} \\left[ \\pi_ {\\theta} \\| \\pi_ {\\text {r e f}} \\right] = \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - \\log \\frac {\\pi_ {\\text {r e f}} \\left(o _ {i , t} | q , o _ {i , < t}\\right)}{\\pi_ {\\theta} \\left(o _ {i , t} | q , o _ {i , < t}\\right)} - 1, \\tag {1}", + "image_path": "07edbdbf2347c24f3eb016b1588bbda5eeb612ef769c5013c6c6fdee06542113.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "content": " are hyper-parameters, and " + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\hat{A}_{i,t}" + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "content": " is the advantage, computed using a group of rewards " + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\{r_1,r_2,\\dots ,r_G\\}" + }, + { + "bbox": [ + 105, + 528, + 506, + 552 + ], + "type": "text", + "content": " corresponding to the outputs within each group. Refer to [12, 55] for more details." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 567, + 198, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 198, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 198, + 581 + ], + "type": "text", + "content": "4 Perception-R1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 593, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 593, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 593, + 504, + 628 + ], + "type": "text", + "content": "In a nutshell, our Perception-R1 applies the rule-based RL algorithm GRPO [55] to the post-training stage of MLLM and optimizes the reward modeling to support perception policy learning. Figure 1 illustrates the idea, more approach and implementation details introduced next." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 640, + 258, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 258, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 258, + 653 + ], + "type": "text", + "content": "4.1 Rule-based Reward Modeling" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "The reward function serves as the principal training signal in reinforcement learning (RL), directing the optimization process. Existing LLM methods [12, 57, 24] basically apply a highly resilient, rule-based reward system consisting of only two reward types: Format Reward and Answer Reward." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "Format Reward. In existing LLM and MLLM, the output format is comprised of two essential components: the final output format and the intermediate reasoning process format. The reward for" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 119, + 68, + 492, + 208 + ], + "blocks": [ + { + "bbox": [ + 119, + 68, + 492, + 208 + ], + "lines": [ + { + "bbox": [ + 119, + 68, + 492, + 208 + ], + "spans": [ + { + "bbox": [ + 119, + 68, + 492, + 208 + ], + "type": "image", + "image_path": "cb9aa06cba656d51b4608fdf2dbc637b5aeafe0c9ddd151a6164daf19bf44f99.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 214, + 506, + 239 + ], + "lines": [ + { + "bbox": [ + 104, + 214, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 239 + ], + "type": "text", + "content": "Figure 1: Illustration of Perception-R1 framework. Following DeepSeek-R1 [12], we prompt MLLM model to generate several rollout responses and apply GRPO [55] during post-training stage." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 258, + 506, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 293 + ], + "type": "text", + "content": "the final output is defined in accordance with specific task requirements and is typically encapsulated within `` tags, whereas the reward for the intermediate reasoning process generally mandates that the reasoning steps be enclosed within `` tags. Formally," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 225, + 312, + 505, + 339 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 312, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 225, + 312, + 505, + 339 + ], + "type": "interline_equation", + "content": "S _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right. \\tag {2}", + "image_path": "7a149f44bd3a4f9a877011b9e1b5c1f1d3918685adfd74b02f83493a39d8af53.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": "In Perception-R1, we follow this setting. A subtle difference emerges that visual perception task frequently requires the output of object coordinates, e.g., bounding box, lines, or points. Consequently, the output format must be strictly constrained to the " + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "inline_equation", + "content": "[x1, y1, x2, y2]" + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": " structure." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 388, + 505, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 505, + 444 + ], + "type": "text", + "content": "Answer Reward. The Answer Reward pertains to the correctness of model-generated responses, serving as a central consideration in reward design. Typically, outputs from language models are abstract and semantically rich, requiring validation through external mechanisms such as code-based ADE [12] or mathematical answer verification [55]. In contrast, visual perception tasks benefit from clearly defined physical ground truths, which simplify the development of a robust reward function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 448, + 507, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 507, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 507, + 493 + ], + "type": "text", + "content": "Perception-R1 diverges from LLM approaches by anchoring the reward mechanism in visual discrimination. This departure is pivotal, as it replaces the often implicit and subjective feedback mechanisms typical of language models with an explicit, quantifiable metric. Formally, discriminative reward " + }, + { + "bbox": [ + 104, + 448, + 507, + 493 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 104, + 448, + 507, + 493 + ], + "type": "text", + "content": " can be represented as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 274, + 516, + 505, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 516, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 274, + 516, + 505, + 529 + ], + "type": "interline_equation", + "content": "r _ {i} = \\Phi \\left(o _ {i}, z\\right), \\tag {3}", + "image_path": "4875cffcb0b4f6689680415c915db9a02d73f410732e3a17c1b934d8f4cc1304.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 540, + 505, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 505, + 574 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 540, + 505, + 574 + ], + "type": "inline_equation", + "content": "\\Phi(\\cdot)" + }, + { + "bbox": [ + 104, + 540, + 505, + 574 + ], + "type": "text", + "content": " indicates the discriminative function, for example, IoU for bounding box and euclidean distance for point. By leveraging visual discrimination, we provide the model with a clear and objective feedback signal, ensuring the model's policy update with precise measured margin." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 586, + 270, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 270, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 270, + 600 + ], + "type": "text", + "content": "4.2 Multi-Subject Reward Matching" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 607, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 674 + ], + "type": "text", + "content": "In natural environments, physical objects rarely appear in isolation and instead frequently co-occur in groups. This inherent complexity gives rise to a challenge we define as reward matching, which entails aligning the model's output with the corresponding ground truth before reward computation. Specifically, when prompting the model to predict the attributes of multiple subjects within an image, e.g., points and bounding box, it is necessary to determine the appropriate ground truth reference for each subject to ensure accurate reward assignment." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "y = \\{y_{i}\\}_{i=1}^{N}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " denote the set of predicted attributes for " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " subjects, and let " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "z = \\{z_{j}\\}_{j=1}^{M}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " represent the corresponding ground truth attributes. We model the reward matching problem as a bipartite graph matching task, where one set of nodes corresponds to predictions and the other to ground truths. The edge weight between a prediction " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " and a ground truth " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "t_{j}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " is determined by the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "reward function " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\Phi(y_i, z_j)" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": " defined in Eq. 3, which measures their similarity or compatibility. The objective is to find the optimal assignment that maximizes the total reward:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 240, + 113, + 505, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 113, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 240, + 113, + 505, + 146 + ], + "type": "interline_equation", + "content": "\\hat {\\sigma} = \\underset {\\sigma \\in \\Omega_ {N}} {\\arg \\max } \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\sigma (i)}), \\tag {4}", + "image_path": "9f7a0c8ae83a0967e411883ad44f42e7f3042fa2b28d0b197b66dc12d7173cd3.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 152, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 504, + 211 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 152, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\Omega_N" + }, + { + "bbox": [ + 104, + 152, + 504, + 211 + ], + "type": "text", + "content": " is the set of all valid assignments between predictions and ground truths. To solve this optimization problem efficiently, we employ the Hungarian algorithm [27], a well-established method for bipartite graph matching that guarantees the optimal pairing by maximizing the overall reward (or equivalently, minimizing the cost). This ensures that each predicted attribute is accurately matched with its corresponding ground truth, thereby optimizing the reward computation process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "content": "After the optimal reward assignment is determined, we calculate the answer reward by aggregating the individual rewards for each subject. Mathematically, the overall reward score is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 241, + 253, + 505, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 253, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 241, + 253, + 505, + 285 + ], + "type": "interline_equation", + "content": "S _ {\\text {a n s w e r}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Phi (y _ {i}, z _ {\\hat {\\sigma} (i)}), \\tag {5}", + "image_path": "44f539dac4b155cf2ac95483e3794e84df26f8133927bc788378cd03789aa417.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 242, + 287, + 341, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 287, + 341, + 300 + ], + "spans": [ + { + "bbox": [ + 242, + 287, + 341, + 300 + ], + "type": "interline_equation", + "content": "S _ {\\text {t o t a l}} = S _ {\\text {f o r m a t}} + S _ {\\text {a n s w e r}}", + "image_path": "9c08b3fd0a306124eb2afdc3e450ee13676fd58299babefc599a7cf81205271b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "inline_equation", + "content": "\\hat{\\sigma}" + }, + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "text", + "content": " is the optimal assignment obtained via the Hungarian algorithm. In Perception-R1, we primarily use reward matching for visual counting and object detection tasks, as these involve multiple objects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "type": "text", + "content": "4.3 Perception-R1 Configuration" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 374, + 505, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 505, + 419 + ], + "type": "text", + "content": "Model Setting. Our model implementation follows Qwen2-VL [61]. We mainly use the Qwen2-VL-Instruct-2B as the baseline model. We also utilize Qwen2.5-VL-3B-Instruct [3] for training object detection tasks, due to its specialized optimization for localizing bounding boxes. The input image resolution for Qwen2-VL is dynamic cooperated with 2D-RoPE [56]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 423, + 506, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 506, + 491 + ], + "type": "text", + "content": "Task and Data Setting. Given that Perception-R1 is primarily oriented towards pure visual and visual-language tasks, we select several mainstream and representative downstream tasks for perception policy learning, specifically including visual grounding, e.g., refCOCO [71] / + [71] / g [40], OCR, i.e., PageOCR [34], visual counting, i.e., Pixmo-Count [13], and object detection, i.e., COCO2017 [32]. For each task, a subset " + }, + { + "bbox": [ + 104, + 423, + 506, + 491 + ], + "type": "inline_equation", + "content": "(5k\\sim 10k)" + }, + { + "bbox": [ + 104, + 423, + 506, + 491 + ], + "type": "text", + "content": " of samples are respectively extracted as base data for individual post-training. More details are in appendix A.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 494, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 504, + 539 + ], + "type": "text", + "content": "Training Setting. We focus on the RL-based post-training stage of MLLM. All the selected base models have already undergone pre-training and SFT stage. During RL stage, the initial learning rate is set as " + }, + { + "bbox": [ + 104, + 494, + 504, + 539 + ], + "type": "inline_equation", + "content": "1e - 6" + }, + { + "bbox": [ + 104, + 494, + 504, + 539 + ], + "type": "text", + "content": " with 8 rollouts by default and a batch size of 1. The following are some important hyper-parameters during post-training. Prompts detailed settings are in the appendix A.1." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 111, + 544, + 499, + 567 + ], + "blocks": [ + { + "bbox": [ + 111, + 544, + 499, + 567 + ], + "lines": [ + { + "bbox": [ + 111, + 544, + 499, + 567 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 499, + 567 + ], + "type": "table", + "html": "
Gradient AccumulationRollout GKL CoefficientMax Response LenTemperature
280.0420481.0
", + "image_path": "89c30d1304cdad26e2e739586527ddda8f446e389ea70502641287d4b639d78e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 574, + 506, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 506, + 652 + ], + "type": "text", + "content": "Reward Setting. We tailor distinct discriminative rewards for various visual perception tasks. For the grounding task, the reward is based on the Intersection over Union (IoU) between the predicted output and the ground truth. In the counting task, we adopt a paradigm similar to Qwen2.5-VL, which first detects points and then counts them. Here, the reward is derived from the Euclidean distance computed during reward matching. For OCR, the edit distance serves as the primary reward metric. Lastly, in object detection, we combine multiple rewards: an object number reward based on the F1 score, a location reward using IoU, and a binary classification reward with a missing penalty." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 506, + 723 + ], + "type": "text", + "content": "Sampling Setting. Following Kimi-1.5 [57], we adopt a curriculum sampling strategy that begins with easier data and gradually transitions to more challenging examples. Specifically, for the object detection task, we first conduct offline training on the COCO dataset to compute reward values. Based on the selected rewards, i.e., number reward, we partition the dataset accordingly. As training advances, we progressively replace the data with more difficult samples (i.e., those associated with lower rewards) while concurrently increasing the rollout to broaden the model's exploration space." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 69, + 499, + 384 + ], + "blocks": [ + { + "bbox": [ + 109, + 69, + 499, + 384 + ], + "lines": [ + { + "bbox": [ + 109, + 69, + 499, + 384 + ], + "spans": [ + { + "bbox": [ + 109, + 69, + 499, + 384 + ], + "type": "table", + "html": "
methodsizeRefCOCO
val@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-87.590.482.6--------
OFA [62]-88.490.683.3--------
LLaVA-1.5 [35]7B49.154.943.310.713.66.90.40.30.320.122.9
LLaVA-NeXT [36]7B82.588.474.045.754.835.61.92.60.743.448.6
LLaVA-OV [28]7B73.082.363.524.229.615.90.50.50.532.637.5
Qwen2-VL [61]2B86.889.682.077.280.670.133.035.726.965.768.6
Perception-R12B89.191.484.579.583.672.435.038.528.867.971.2
RefCOCO+
methodsizeval@50testA@50testB@50val@75testA@75testB@75val@95testA@95testB@95valAvgtestAAvg
MDETR [25]-81.185.572.9--------
OFA [62]-81.387.174.2--------
LLaVA-1.5 [35]7B42.449.736.49.812.46.40.50.50.217.620.8
LLaVA-NeXT [36]7B74.584.064.741.551.830.01.92.71.039.346.2
LLaVA-OV [28]7B65.879.057.223.628.815.30.60.60.430.036.1
Qwen2-VL [61]2B77.182.570.168.773.860.029.432.323.058.462.9
Perception-R12B81.786.874.373.679.364.232.636.926.762.667.7
RefCOCOg
methodsizeval@50test@50val@75test@75val@95test@95valAvgtestAvg
MDETR [25]-83.383.3------
OFA [62]-82.282.3------
LLaVA-1.5 [35]7B43.245.18.59.30.30.317.318.2
LLaVA-NeXT [36]7B77.577.140.739.91.81.740.039.6
LLaVA-OV [28]7B70.870.823.323.60.60.731.631.7
Qwen2-VL [61]2B83.383.172.773.028.927.961.661.3
Perception-R12B85.785.475.776.032.133.164.564.8
", + "image_path": "e44a87e5b4cbe43f61787ce03abac0772efea141fb0986021b5a52e9dcc58f20.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 106, + 426, + 502, + 515 + ], + "blocks": [ + { + "bbox": [ + 105, + 386, + 504, + 419 + ], + "lines": [ + { + "bbox": [ + 105, + 386, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 504, + 419 + ], + "type": "text", + "content": "Table 1: Visual grounding benchmark evaluation. To comprehensively assess the model's grounding capability, we select referring expression comprehension (REC) benchmark, i.e., RefCOCO [71], RefCOCO+[71], and RefCOCOg[40] for evaluation. The expert model is denoted in gray." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 426, + 502, + 515 + ], + "lines": [ + { + "bbox": [ + 106, + 426, + 502, + 515 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 502, + 515 + ], + "type": "table", + "html": "
sizeEdit Distance ↓F1-score ↑Precision ↑Recall ↑BLEU ↑METEOR ↑
enzhenzhenzhenzhenzhenzh
Nougat [4]250M25.5-74.5-72.0-80.9-66.5-76.1-
DocOwl1.5 [23]7B25.8-86.2-83.5-96.2-78.8-85.8-
GOT [65]580M3.53.897.298.097.198.297.397.894.787.895.893.9
Qwen2-VL [61]2B8.010.094.493.096.996.193.090.590.978.094.187.2
LLaVA-NeXT [36]7B43.0-64.7-57.3-88.1-47.8-58.2-
Perception-R12B3.59.098.294.498.696.397.892.796.774.698.188.9
", + "image_path": "568578d2d46e7653152d99d920d5ccffedb75b5abfb947b17fdb84cd37721d67.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 519, + 504, + 541 + ], + "lines": [ + { + "bbox": [ + 104, + 519, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 504, + 541 + ], + "type": "text", + "content": "Table 2: PageOCR evaluation, compared with various strong expert and general models. \"en\" means English and \"zh\" means Chinese." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 548, + 192, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 192, + 562 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 192, + 562 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "type": "text", + "content": "The experimental section evaluates Perception-R1's performance on visual perception tasks (§ 5.1), followed by analytical experiments exploring reinforcement learning (RL)'s role in perception policy learning (§ 5.2). Finally, it discusses the interplay between visual perception and RL, along with key insights for perception policy learning (§ 5.3)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 631, + 320, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 320, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 320, + 643 + ], + "type": "text", + "content": "5.1 Performance Landscape in Perception Tasks" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 651, + 505, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 505, + 685 + ], + "type": "text", + "content": "We evaluate Perception-R1 on mainstream perception tasks: visual grounding, counting, OCR, and object detection. Experiments use the datasets described in § 4.3 and benchmarks for image understanding. Results are in Tables 1-4. See Appendix A.2 for details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "content": "Visual Grounding is a task that involves localizing visual objects based on linguistic descriptions. Specifically, given a language prompt, the model is required to output the spatial coordinates of the subject (typically a single entity) described in the prompt. As shown in Table 1, we evaluate" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 71, + 298, + 148 + ], + "blocks": [ + { + "bbox": [ + 109, + 71, + 298, + 148 + ], + "lines": [ + { + "bbox": [ + 109, + 71, + 298, + 148 + ], + "spans": [ + { + "bbox": [ + 109, + 71, + 298, + 148 + ], + "type": "table", + "html": "
methodsizeVisual Counting
PixmovalPixmotoetest
LLaVA-1.5 [35]7B33.331.0
LLaVA-1.6 [58]7B32.731.9
LLaVA-OV [28]7B55.853.7
Qwen2-VL [61]2B60.250.5
Perception-R12B78.175.6
", + "image_path": "22e55b7f6a2c932d951d78c5fb9270f67a7affd9a4fdf14064edc50383995f6b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 105, + 150, + 298, + 169 + ], + "lines": [ + { + "bbox": [ + 105, + 150, + 298, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 298, + 169 + ], + "type": "text", + "content": "(a) Visual counting evaluation on Pixmo-Count [13] \nval set and test set." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 309, + 71, + 503, + 148 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 503, + 148 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 503, + 148 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 503, + 148 + ], + "type": "table", + "html": "
methodsizeepochObject Detection
AP\\( AP_{50} \\)\\( AP_{75} \\)
YOLOv3 [51]-27327.949.228.3
Faster-RCNN [52]-1235.655.737.9
DETR [6]41M50042.062.444.2
Qwen2.5-VL [3]3B116.123.716.7
Perception-R1†3B131.946.733.4
", + "image_path": "67a5388ba51edf551e5fad1bcf4c017c24711e7b0aff42cdea97dc8a11150861.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 309, + 149, + 501, + 169 + ], + "lines": [ + { + "bbox": [ + 309, + 149, + 501, + 169 + ], + "spans": [ + { + "bbox": [ + 309, + 149, + 501, + 169 + ], + "type": "text", + "content": "(b) Object detection evaluation on COCO2017 [32] validation set." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 214, + 501, + 283 + ], + "blocks": [ + { + "bbox": [ + 104, + 173, + 506, + 207 + ], + "lines": [ + { + "bbox": [ + 104, + 173, + 506, + 207 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 506, + 207 + ], + "type": "text", + "content": "Table 3: Mainstream visual tasks evaluation including (a) visual object counting and (b) challenging general object detection. Notably, the results of expert model in (b) are copied from MMDetection [7]. " + }, + { + "bbox": [ + 104, + 173, + 506, + 207 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 104, + 173, + 506, + 207 + ], + "type": "text", + "content": " means Perception-R1 for object detection is build based on Qwen2.5-VL-3B-Instruct [3]." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 214, + 501, + 283 + ], + "lines": [ + { + "bbox": [ + 107, + 214, + 501, + 283 + ], + "spans": [ + { + "bbox": [ + 107, + 214, + 501, + 283 + ], + "type": "table", + "html": "
llmMMBenchMMVetMMStar ScienceQASeedBenchMMELLaVA-BenchAI2D
AvgAvgAvgAvgAvgAvgAvgCognitionPerceptionAvgAvg
LLaVA1.5 [35]Vicuna1.5-7B62.832.832.665.460.1302.11338.352.651.9
LLaVA-NeXT [36]Vicuna1.5-7B66.037.937.768.269.1195.71419.552.767.4
Qwen2-VL [61]Qwen2-2B71.945.646.374.072.7418.51471.146.571.6
Perception-R1Qwen2-2B71.848.945.773.473.0430.01473.958.271.8
", + "image_path": "6b1dde97d407fde40d64c3c974e5631c46e7f989a63f11f79ab2852f4c146256.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 287, + 506, + 331 + ], + "lines": [ + { + "bbox": [ + 104, + 287, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 506, + 331 + ], + "type": "text", + "content": "Table 4: General image understanding and reasoning evaluation, compared with various baselines. We select 8 mainstream multimodal benchmarks, i.e., MMBench [38], MMVet [72], MMStar [9], ScienceQA [53], SeedBench [18], MME [16], LLaVA-Bench [37], and ai2D [26] for the comprehensive understanding. We use the model after RL training in the counting tasks for the eval." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 345, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 422 + ], + "type": "text", + "content": "Perception-R1 on three mainstream benchmarks, refCOCO / + / g, and report Acc@0.5, Acc@0.75, and Acc@0.95 to comprehensively assess its visual grounding capability. We surprisingly find that several SoTA MLLMs exhibit poor performance on the more challenging Acc@0.95 metric, with scores even below 1%. In contrast, Perception-R1 achieves a stable performance of over 30% on this metric. This observation suggests that the community should prioritize reporting more discriminative results in future evaluations. The experimental results demonstrate that Perception-R1 exhibits strong competitiveness compared to both specialized and general-purpose models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 426, + 506, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 537 + ], + "type": "text", + "content": "Optical Character Recognition (OCR) represents a critical task in visual perception due to its substantial practical value. Current methodologies predominantly adopt either expert models or fine-tuned generalist models for OCR. Perception-R1 pioneers the utilization of RL to further unlock the OCR capabilities of MLLM. As shown in Table 2, our proposed Perception-R1 achieves SoTA performance on the highly challenging OCR benchmark, i.e., PageOCR [34], demonstrating significant superiority over existing expert models, e.g., GOT (98.1 vs. 97.2 F1-score) and robust generalist models, e.g., LLaVA-NeXT (98.1 vs. 64.7 F1-score). Notably, Perception-R1 does not use the Chinese OCR data for training so it is a zero-shot performance for Chinese metric. This breakthrough substantiates the formidable potential of RL applications in OCR tasks, establishing new frontiers for enhancing textual understanding and recognition in complex visual environments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 541, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 641 + ], + "type": "text", + "content": "Visual Counting, as a fundamental vision task, necessitates models to accurately quantify category-specific instances within images, requiring robust visual logic to identify and enumerate targets through structured recognition patterns. In Perception-R1, we adopt a detect-then-count paradigm that reformulates the counting problem into a point detection process. As shown in Table 3a, Perception-R1 achieves remarkable counting performance, surpassing the current strong baselines by a substantial margin (17.9% improvement compared to Qwen2-VL in Pixmo val set). This advancement substantiates that RL effectively stimulates models to explore intrinsic visual logic mechanisms (Although counting yields deterministic results, the sequence of counting can exhibit distinct patterns.), thereby enhancing their capacity to resolve complex vision tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "General Object Detection, widely regarded as the crown jewel of computer vision tasks, has long been considered one of the most challenging problems in visual perception. As a pioneering endeavor to integrate RL into object detection, Perception-R1 achieves a groundbreaking milestone, serving as the first pure MLLM to surpass the " + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": " AP threshold, i.e., 31.9 AP in Table 3b, on the COCO 2017 val set, matching or even exceeding the performance of specialized expert models. This achievement underscores rule-based RL's immense potential in addressing complex vision tasks requiring sophisticated visual-logic integration." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 123, + 69, + 486, + 173 + ], + "blocks": [ + { + "bbox": [ + 123, + 69, + 486, + 173 + ], + "lines": [ + { + "bbox": [ + 123, + 69, + 486, + 173 + ], + "spans": [ + { + "bbox": [ + 123, + 69, + 486, + 173 + ], + "type": "table", + "html": "
caseVisual GroundingOCR PageOCRVisual CountingDetection COCO2017
RefCOCORefCOCO+RefCOCOgPixmovalPixmotest
Perception-R189.181.785.798.478.175.631.9
w/o reward matching----77.175.423.5
w/o RL86.877.183.394.460.250.516.1
w thinking75.167.971.377.374.972.825.7
w/o thinking89.181.785.795.778.175.628.1
RL only89.181.785.795.778.175.631.9
SFT only88.280.784.695.358.059.925.9
SFT+RL88.480.785.197.377.175.430.8
", + "image_path": "b7035c312ed2b32280d4a48e901452b0da27d5e28e8481473c4d5374ca58c5bf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 106, + 231, + 504, + 311 + ], + "blocks": [ + { + "bbox": [ + 104, + 175, + 504, + 229 + ], + "lines": [ + { + "bbox": [ + 104, + 175, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 504, + 229 + ], + "type": "text", + "content": "Table 5: Ablation Study of Perception-R1. We perform ablation studies to investigate key properties of Perception-R1 across a range of visual perception tasks. Specifically, we report the Acc@0.5 for RefCOCO / + / g val set, the F1-score for PageOCR, the average scores for Pixmo-Count, and the AP metric for COCO2017 val set. w/o means without. Notably, there is no reward matching applied to visual grounding and OCR tasks, as these tasks do not involve the multi-subject reward." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 231, + 504, + 311 + ], + "lines": [ + { + "bbox": [ + 106, + 231, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 504, + 311 + ], + "type": "table", + "html": "
reward functionCOCO2017
AP\\( AP_{50} \\)\\( AP_{75} \\)
format reward---
format reward + location reward (IoU)18.825.320.1
format reward + location reward (IoU) + cls reward20.227.321.4
format reward + location reward (IoU) + cls reward + recall reward (F1)27.642.028.7
format reward + location reward (IoU) + cls reward + recall reward (F1) + missing reward28.142.029.6
", + "image_path": "17e909db05a21591ec24e42bc0b1d2eb629c10109a7cd0731914d981373ef4e4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "lines": [ + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "type": "text", + "content": "Table 6: Reward design analysis of Perception-R1. pls reward indicates binary classification reward and missing reward is a penalty to penalize missed detections. To facilitate rapid experimentation, we randomly sampled 10k data from COCO2017 train set for this experiment." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 352, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 506, + 407 + ], + "type": "text", + "content": "General Visual Comprehension extends beyond pure perceptual tasks, and we evaluate Perception-R1 on multiple multimodal benchmarks. As shown in Table 4, we observe an intriguing phenomenon that models trained with RL for vision-specific tasks, e.g., counting task, exhibit concurrent performance gains in generic comprehension benchmarks. We attribute this cross-task enhancement to the perception policy learning, which drives the model to discover superior image interpretation patterns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 422, + 269, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 269, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 269, + 434 + ], + "type": "text", + "content": "5.2 Ablation Study of Perception-R1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 443, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 506, + 477 + ], + "type": "text", + "content": "In this section, we aim to conduct a comprehensive ablation study to systematically investigate the contributions of critical components within Perception-R1. Experimental results are shown in Table 5. From the experimental results, we can derive three principal empirical findings:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 481, + 504, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 504, + 548 + ], + "type": "text", + "content": "Reward matching enhances the explorability of multi-subject visual perception. As evidenced by the comparative results between row 1 and 2 in Table 5, replacing the bipartite matching with sequential matching leads to substantial performance degradation in both visual counting and object detection task. This suggests that sequential matching constrains the RL exploration space. On the contrast, the bipartite matching mechanism provides more possibility in reward assignment, enabling the model to explore optimal visual perception patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "type": "text", + "content": "Explicit thinking processes prove non-essential for contemporary visual perception. Comparative analysis of row 3 and 4 reveals consistent performance degradation across all four evaluated perception tasks when incorporating an explicit thinking process during both training and inference phases. Similar phenomenon also emerges in image classification tasks [30]. We posit that this phenomenon arises because current visual perception tasks are more oriented toward visual logic rather than semantic logic. This shift implies that explicit language-centered reasoning processes are unnecessary, as models tend to focus more on learning implicit visual patterns." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Perceptual perplexity determines RL superiority over SFT. We compare the different combinations of post-training method, i.e., SFT, RL, and " + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{SFT + RL}" + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": ", across four perception tasks, as shown in row 6, 7, 8 of Table 5. In tasks with high perceptual perplexity, such as counting and detection (multiple objects and categories), RL demonstrates superior performance enhancement compared to SFT or even " + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{SFT + RL}" + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": ". Conversely, in low-perplexity tasks such as grounding and OCR, RL underperforms relative to SFT or " + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{SFT + RL}" + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": ". This indicates that high perceptual perplexity a significant factor influencing the effectiveness of RL. It suggests that RL techniques should be applied to tasks with greater perceptual perplexity, where the exploration space for perception policy is larger." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 70, + 203, + 162 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 203, + 162 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 203, + 162 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 203, + 162 + ], + "type": "image", + "image_path": "3053b8e6241b40acdbecf83ae363a7f83ebec9ed84048e20cc6e311d938803bd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 167, + 197, + 178 + ], + "lines": [ + { + "bbox": [ + 116, + 167, + 197, + 178 + ], + "spans": [ + { + "bbox": [ + 116, + 167, + 197, + 178 + ], + "type": "text", + "content": "(a) Grounding reward" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 209, + 70, + 303, + 161 + ], + "blocks": [ + { + "bbox": [ + 209, + 70, + 303, + 161 + ], + "lines": [ + { + "bbox": [ + 209, + 70, + 303, + 161 + ], + "spans": [ + { + "bbox": [ + 209, + 70, + 303, + 161 + ], + "type": "image", + "image_path": "f92468a49b9c1c8ca4893b7a61fdc713f5cfb6c948a614ddf22f1ec900d85de3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 167, + 306, + 178 + ], + "lines": [ + { + "bbox": [ + 205, + 167, + 306, + 178 + ], + "spans": [ + { + "bbox": [ + 205, + 167, + 306, + 178 + ], + "type": "text", + "content": "(b) Grounding performance" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 71, + 399, + 162 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 399, + 162 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 399, + 162 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 399, + 162 + ], + "type": "image", + "image_path": "aee8ba28fe17acb09471504630258562d6034bed74d6104b513bf8b4ff85240c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 317, + 167, + 392, + 178 + ], + "lines": [ + { + "bbox": [ + 317, + 167, + 392, + 178 + ], + "spans": [ + { + "bbox": [ + 317, + 167, + 392, + 178 + ], + "type": "text", + "content": "(c) Counting reward" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "lines": [ + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "type": "text", + "content": "Figure 2: Scalability analysis of Perception-R1. We select two primary tasks: grounding and counting. We visualize the training reward curves under varying numbers of rollouts and evaluate the final performance of each task. All experiments are conducted with " + }, + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "type": "inline_equation", + "content": "5k" + }, + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "type": "text", + "content": " sampled data. And the default rollout number setting " + }, + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "type": "inline_equation", + "content": "(1\\times)" + }, + { + "bbox": [ + 104, + 180, + 504, + 225 + ], + "type": "text", + "content": " is 8." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 406, + 71, + 499, + 161 + ], + "blocks": [ + { + "bbox": [ + 406, + 71, + 499, + 161 + ], + "lines": [ + { + "bbox": [ + 406, + 71, + 499, + 161 + ], + "spans": [ + { + "bbox": [ + 406, + 71, + 499, + 161 + ], + "type": "image", + "image_path": "346f636339e1da4901dd34bcfb41c75b7043da12f645cc487144f19a5e0a4dec.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 167, + 501, + 178 + ], + "lines": [ + { + "bbox": [ + 405, + 167, + 501, + 178 + ], + "spans": [ + { + "bbox": [ + 405, + 167, + 501, + 178 + ], + "type": "text", + "content": "(d) Counting performance" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 242, + 232, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 232, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 232, + 254 + ], + "type": "text", + "content": "5.3 More In-depth Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 266, + 505, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 505, + 289 + ], + "type": "text", + "content": "In this section, we explore several key properties of Perception-R1 to further enhance our understanding of Perception Policy Learning with RL." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 293, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 392 + ], + "type": "text", + "content": "Analysis of reward design for perception policy learning. We introduced the details of reward function of Perception-R1 in § 4.3. In this part, we examine the influence of these reward functions on perception policy learning. Specifically, using object detection as a case study, we incrementally integrate the designed answer reward into the format reward, as illustrated in Table 6. The results indicate that the progressive introduction of refined reward functions leads to consistent improvements in detection performance, ultimately exceeding the performance of expert models. This underscores the critical role of reward design in perception policy learning. Furthermore, it identifies a promising avenue for future research: the development of more refined and task-specific reward functions to enhance perception policy learning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 397, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 496 + ], + "type": "text", + "content": "Analysis of scaling up rollout for perception policy learning. The scalability of RL is a key concern of existing LLM post-training. In this part, we analyze the scalability of Perception-R1, focusing specifically on scaling up the number of rollouts. As shown in Figure 2, we conduct rollout-scaling experiments in two tasks: visual grounding and visual counting. The results indicate that increasing rollout count enhances reward optimization and final performance. This demonstrates Perception-R1's strong scaling properties and underscores the critical role of rollout quantity in scaling perception policies. By generating sufficient rollouts, the model broadens its exploration space, increasing the diversity of candidate solutions for reward evaluation. This expansion accelerates convergence to optimal visual perception patterns." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 521, + 263, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 263, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 263, + 534 + ], + "type": "text", + "content": "6 Limitation and Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "type": "text", + "content": "\"What can RL bring to MLLM?\" is a public question since the propose of DeepSeek-R1. Several latest works attempt to apply RL from the perspective of language-centric visual reasoning [39, 15, 41]. However, in this paper, we take a different pathway and argue that perception is a crucial prerequisite for visual reasoning. Only by fully unlocking the perception patterns of MLLMs can the models possess the ability to reason about complex visual tasks. Nevertheless, we regrettably find that many current perception tasks are overly simplistic, which limits the exploration space for RL. This, in turn, restricts the possibility of MLLMs achieving a perceptual \"Aha moment\" through thinking process. Finding more appropriate perception tasks, aka., meta task, may be the key to addressing this issue." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "content": "In a summary, this work takes a pioneering step in exploring the potential of rule-based RL in MLLM post-training for perception policy learning. Through extensive experimental analysis, we establish several valuable cognition about perception policy learning with RL. Driven by these findings, we build Perception-R1, a simple, effective, and scalable RL framework for efficient perception policy learning. Perception-R1 sets new SoTAs across multiple visual perception tasks, particularly in object detection tasks. By introducing a novel paradigm, it achieves and even surpasses the performance of expert models, thereby demonstrating the significant potential of perception policy learning." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 89, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 111, + 89, + 505, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 89, + 505, + 113 + ], + "spans": [ + { + "bbox": [ + 111, + 89, + 505, + 113 + ], + "type": "text", + "content": "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 119, + 504, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 119, + 504, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 119, + 504, + 152 + ], + "type": "text", + "content": "[2] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 159, + 505, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 159, + 505, + 193 + ], + "spans": [ + { + "bbox": [ + 111, + 159, + 505, + 193 + ], + "type": "text", + "content": "[3] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 200, + 504, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 200, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 111, + 200, + 504, + 224 + ], + "type": "text", + "content": "[4] Lukas Blecher, Guillem Cucurull, Thomas Scialom, and Robert Stojnic. Nougat: Neural optical understanding for academic documents. arXiv preprint arXiv:2308.13418, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 230, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 230, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 111, + 230, + 506, + 274 + ], + "type": "text", + "content": "[5] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 281, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 281, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 281, + 504, + 316 + ], + "type": "text", + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 322, + 504, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 322, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 111, + 322, + 504, + 378 + ], + "type": "text", + "content": "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 384, + 506, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 506, + 418 + ], + "type": "text", + "content": "[8] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 425, + 504, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 425, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 111, + 425, + 504, + 459 + ], + "type": "text", + "content": "[9] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 465, + 506, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 499 + ], + "type": "text", + "content": "[10] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 506, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 504, + 550 + ], + "type": "text", + "content": "[11] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 557, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 506, + 723 + ], + "type": "text", + "content": "[12] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 126, + 72, + 506, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 72, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 126, + 72, + 506, + 236 + ], + "type": "text", + "content": "T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, Zilin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 244, + 505, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 244, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 505, + 289 + ], + "type": "text", + "content": "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "type": "text", + "content": "[14] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, Xiangwen Kong, Xiangyu Zhang, Kaisheng Ma, and Li Yi. DreamLLM: Synergistic multimodal comprehension and creation. In The Twelfth International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 349, + 504, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 349, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 349, + 504, + 383 + ], + "type": "text", + "content": "[15] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 390, + 505, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 505, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 434 + ], + "type": "text", + "content": "[16] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 443, + 504, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 443, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 107, + 443, + 504, + 466 + ], + "type": "text", + "content": "[17] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 473, + 504, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 473, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 107, + 473, + 504, + 496 + ], + "type": "text", + "content": "[18] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 503, + 245, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 503, + 245, + 516 + ], + "spans": [ + { + "bbox": [ + 107, + 503, + 245, + 516 + ], + "type": "text", + "content": "[19] GPT-4o. Hello gpt-4o, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 523, + 506, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 523, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 107, + 523, + 506, + 546 + ], + "type": "text", + "content": "[20] Kaiming He, Georgia Gkioxari, Piotr Dólár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 553, + 506, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 553, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 107, + 553, + 506, + 587 + ], + "type": "text", + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 594, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 594, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 594, + 506, + 639 + ], + "type": "text", + "content": "[22] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, et al. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14281-14290, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "text", + "content": "[23] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. mplug-docowl 1.5: Unified structure learning forocr-free document understanding. arXiv preprint arXiv:2403.12895, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "text", + "content": "[24] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[25] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 159 + ], + "type": "text", + "content": "[26] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 165, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 165, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 107, + 165, + 504, + 190 + ], + "type": "text", + "content": "[27] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97, 1955." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "type": "text", + "content": "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 236, + 504, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 236, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 107, + 236, + 504, + 261 + ], + "type": "text", + "content": "[29] Jinyang Li, En Yu, Sijia Chen, and Wenbing Tao. Ovtr: End-to-end open-vocabulary multiple object tracking with transformer. arXiv preprint arXiv:2503.10616, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 267, + 506, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 267, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 107, + 267, + 506, + 291 + ], + "type": "text", + "content": "[30] Ming Li, Shitian Zhao, Jike Zhong, Yuxiang Lai, and Kaipeng Zhang. Cls-rl: Image classification with rule-based reinforcement learning. arXiv preprint arXiv:2503.16188, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 297, + 504, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 504, + 322 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 504, + 322 + ], + "type": "text", + "content": "[31] Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 327, + 506, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 506, + 373 + ], + "type": "text", + "content": "[32] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 380, + 506, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 380, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 107, + 380, + 506, + 414 + ], + "type": "text", + "content": "[33] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 421, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 421, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 107, + 421, + 506, + 456 + ], + "type": "text", + "content": "[34] Chenglong Liu, Haoran Wei, Jinyue Chen, Lingyu Kong, Zheng Ge, Zining Zhu, Liang Zhao, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Focus anywhere for fine-grained multi-page document understanding. arXiv preprint arXiv:2405.14295, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 462, + 504, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 462, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 107, + 462, + 504, + 497 + ], + "type": "text", + "content": "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 503, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 503, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 107, + 503, + 506, + 528 + ], + "type": "text", + "content": "[36] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 533, + 504, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 533, + 504, + 558 + ], + "spans": [ + { + "bbox": [ + 107, + 533, + 504, + 558 + ], + "type": "text", + "content": "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "type": "text", + "content": "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "type": "text", + "content": "[39] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "text", + "content": "[40] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11–20, 2016." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 504, + 723 + ], + "type": "text", + "content": "[41] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[42] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 396, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 396, + 125 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 396, + 125 + ], + "type": "text", + "content": "[43] OpenAI. Chatgpt. https://openai.com/blog/chatgpt, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 130, + 422, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 130, + 422, + 143 + ], + "spans": [ + { + "bbox": [ + 107, + 130, + 422, + 143 + ], + "type": "text", + "content": "[44] OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 148, + 355, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 148, + 355, + 161 + ], + "spans": [ + { + "bbox": [ + 107, + 148, + 355, + 161 + ], + "type": "text", + "content": "[45] OpenAI. Learning to reason with llms, September 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 167, + 506, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 506, + 211 + ], + "type": "text", + "content": "[46] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 217, + 506, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 217, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 506, + 262 + ], + "type": "text", + "content": "[47] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 269, + 504, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 269, + 504, + 303 + ], + "spans": [ + { + "bbox": [ + 106, + 269, + 504, + 303 + ], + "type": "text", + "content": "[48] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 308, + 506, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 506, + 354 + ], + "type": "text", + "content": "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 360, + 506, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 360, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 107, + 360, + 506, + 394 + ], + "type": "text", + "content": "[50] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "type": "text", + "content": "[51] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 429, + 506, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 506, + 463 + ], + "type": "text", + "content": "[52] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "type": "text", + "content": "[53] Tanik Saikh, Tirthankar Ghosal, Amish Mittal, Asif Ekbal, and Pushpak Bhattacharyya. Scienceqa: A novel resource for question answering on scholarly articles. International Journal on Digital Libraries, 23(3):289-301, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 510, + 504, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 510, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 107, + 510, + 504, + 533 + ], + "type": "text", + "content": "[54] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 539, + 506, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 539, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 107, + 539, + 506, + 573 + ], + "type": "text", + "content": "[55] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 578, + 506, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 506, + 602 + ], + "type": "text", + "content": "[56] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 608, + 506, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 608, + 506, + 642 + ], + "spans": [ + { + "bbox": [ + 107, + 608, + 506, + 642 + ], + "type": "text", + "content": "[57] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 647, + 506, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 506, + 682 + ], + "type": "text", + "content": "[58] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 688, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 506, + 722 + ], + "type": "text", + "content": "[59] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 719 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[60] Fei Wang, Wenxuan Zhou, James Y Huang, Nan Xu, Sheng Zhang, Hoifung Poon, and Muhao Chen. mdpo: Conditional preference optimization for multimodal large language models. arXiv preprint arXiv:2406.11839, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 504, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 504, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 504, + 147 + ], + "type": "text", + "content": "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 153, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 153, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 506, + 198 + ], + "type": "text", + "content": "[62] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 205, + 506, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 506, + 249 + ], + "type": "text", + "content": "[63] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, Jinrong Yang, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Vary: Scaling up the vision vocabulary for large vision-language model. In European Conference on Computer Vision, pages 408-424. Springer, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 256, + 506, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 506, + 291 + ], + "type": "text", + "content": "[64] Haoran Wei, Lingyu Kong, Jinyue Chen, Liang Zhao, Zheng Ge, En Yu, Jianjian Sun, Chunrui Han, and Xiangyu Zhang. Small language model meets with reinforced vision vocabulary. arXiv preprint arXiv:2401.12503, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "type": "text", + "content": "[65] Haoran Wei, Chenglong Liu, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, Zheng Ge, Liang Zhao, Jianjian Sun, Yuang Peng, et al. GeneralOCR theory: TowardsOCR-2.0 via a unified end-to-end model. arXiv preprint arXiv:2409.01704, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 338, + 506, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 338, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 107, + 338, + 506, + 372 + ], + "type": "text", + "content": "[66] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 378, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 378, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 378, + 506, + 423 + ], + "type": "text", + "content": "[67] Huajian Xin, Z. Z. Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, Wenjun Gao, Qihao Zhu, Dejian Yang, Zhibin Gou, Z. F. Wu, Fuli Luo, and Chong Ruan. Deepseek-prover-v1.5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 430, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 430, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 504, + 464 + ], + "type": "text", + "content": "[68] En Yu, Kangheng Lin, Liang Zhao, Yana Wei, Zining Zhu, Haoran Wei, Jianjian Sun, Zheng Ge, Xiangyu Zhang, Jingyu Wang, et al. Unhackable temporal rewarding for scalable video mllms. arXiv preprint arXiv:2502.12081, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "text", + "content": "[69] En Yu, Tiancai Wang, Zhuoling Li, Yang Zhang, Xiangyu Zhang, and Wenbing Tao. Motrv3: Releasefetch supervision for end-to-end multi-object tracking. arXiv preprint arXiv:2305.14298, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "type": "text", + "content": "[70] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 552, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 552, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 107, + 552, + 506, + 597 + ], + "type": "text", + "content": "[71] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 603, + 506, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 603, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 107, + 603, + 506, + 638 + ], + "type": "text", + "content": "[72] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 644, + 504, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 644, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 107, + 644, + 504, + 679 + ], + "type": "text", + "content": "[73] Liang Zhao, En Yu, Zheng Ge, Jinrong Yang, Haoran Wei, Hongyu Zhou, Jianjian Sun, Huang Peng, Runpei Dong, Chunrui Han, et al. Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "type": "text", + "content": "[74] Zining Zhu, Liang Zhao, Kangheng Lin, Jinze Yang, En Yu, Chenglong Liu, Haoran Wei, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Perpo: Perceptual preference optimization via discriminative rewarding. arXiv preprint arXiv:2502.04371, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 179, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 179, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 179, + 85 + ], + "type": "text", + "content": "A Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 504, + 129 + ], + "type": "text", + "content": "In this appendix, we provide additional details about Perception-R1, which are omitted due to the 9-page limit of the main paper. Specifically, Section A.1 elaborates on the detailed dataset and training settings. Section A.2 presents more experimental results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 142, + 331, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 331, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 331, + 154 + ], + "type": "text", + "content": "A.1 Additional Details about Experimental Setting" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 162, + 504, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 162, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 104, + 162, + 504, + 195 + ], + "type": "text", + "content": "More detailed dataset information of Perception-R1. In Section 4.3, we introduced what data was used for RL post-training of Perception-R1 on which tasks. In this part, we will provide more detailed information about the datasets, as shown in Table 7." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 149, + 204, + 459, + 273 + ], + "blocks": [ + { + "bbox": [ + 149, + 204, + 459, + 273 + ], + "lines": [ + { + "bbox": [ + 149, + 204, + 459, + 273 + ], + "spans": [ + { + "bbox": [ + 149, + 204, + 459, + 273 + ], + "type": "table", + "html": "
tasksdatasetsOriginalUsedRatio
visual groundingRefCOCO / RefCOCO+ / RefCOCOg320k5k1.56%
OCRPageOCR50k5k10%
visual countingPixMo-Count1.9M10k0.5%
object detectionCOCO2017110k110k100%
overall-2.38M130k-
", + "image_path": "98895dacdebc846941fa4240b973e0e77e41234c273900b597e8f824a43d6b97.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 312, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 346 + ], + "type": "text", + "content": "More detailed training setting information of Perception-R1. Section 4.3 elaborates on several key parameters of Perception-R1. In this part, we further demonstrate the diverse prompts employed for distinct perception tasks, as shown in Table 8." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 106, + 356, + 504, + 415 + ], + "blocks": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "type": "text", + "content": "Table 7: Training dataset statistics. Notably, we do not mix the data from different perception tasks for joint training because the rewards for different tasks vary." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 356, + 504, + 415 + ], + "lines": [ + { + "bbox": [ + 106, + 356, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 504, + 415 + ], + "type": "table", + "html": "
taskssystem promptuser prompt
visual groundingQwen2-VLOutput the bounding box of the {question} in the image.
OCRQwen2-VLOCR this image.
visual countingQwen2-VLOutput all the bounding boxes of the {label}
object detectionQwen2.5-VLPlease output bbox coordinates and names of {90 categories of COCO}.
", + "image_path": "60e4f5a6e447288126a3a996a74edce8999b36c1645b0807a13af873d56e9b91.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 418, + 504, + 443 + ], + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 443 + ], + "type": "text", + "content": "Table 8: Prompts of Perception-R1. The system prompt of Perception-R1 follows Qwen2-VL [61] and Qwen2.5-VL [3]." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 460, + 272, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 272, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 272, + 473 + ], + "type": "text", + "content": "A.2 Additional Experimental Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 481, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 504, + 503 + ], + "type": "text", + "content": "In this section, we provide more qualitative analysis of Perception-R1 on multiple visual perception tasks. The selected cases are shown in Figure 3-6." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 128, + 515, + 482, + 677 + ], + "blocks": [ + { + "bbox": [ + 128, + 515, + 482, + 677 + ], + "lines": [ + { + "bbox": [ + 128, + 515, + 482, + 677 + ], + "spans": [ + { + "bbox": [ + 128, + 515, + 482, + 677 + ], + "type": "image", + "image_path": "0462c8ec286d5c7d395e7cbc94895fcdc812f7bea13f6f8a53be3af0b8a4702d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 689, + 434, + 700 + ], + "lines": [ + { + "bbox": [ + 175, + 689, + 434, + 700 + ], + "spans": [ + { + "bbox": [ + 175, + 689, + 434, + 700 + ], + "type": "text", + "content": "Figure 3: Demo case of Percpetion-R1 on visual counting task." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 125, + 189, + 132 + ], + "blocks": [ + { + "bbox": [ + 111, + 121, + 134, + 130 + ], + "lines": [ + { + "bbox": [ + 111, + 121, + 134, + 130 + ], + "spans": [ + { + "bbox": [ + 111, + 121, + 134, + 130 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 149, + 125, + 189, + 132 + ], + "lines": [ + { + "bbox": [ + 149, + 125, + 189, + 132 + ], + "spans": [ + { + "bbox": [ + 149, + 125, + 189, + 132 + ], + "type": "image", + "image_path": "66e3dc80cff0f2751ef3431afc35a24985f5ee06c3e675f05e4e3abbf56e3f18.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 208, + 133, + 308, + 193 + ], + "blocks": [ + { + "bbox": [ + 208, + 133, + 308, + 193 + ], + "lines": [ + { + "bbox": [ + 208, + 133, + 308, + 193 + ], + "spans": [ + { + "bbox": [ + 208, + 133, + 308, + 193 + ], + "type": "image", + "image_path": "17ad0191a60a3dd9a54083a097320ffdc99ee524bda739999b0c69e290bafa00.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 663, + 413, + 675 + ], + "lines": [ + { + "bbox": [ + 195, + 663, + 413, + 675 + ], + "spans": [ + { + "bbox": [ + 195, + 663, + 413, + 675 + ], + "type": "text", + "content": "Figure 4: Demo case of Percpetion-R1 on OCR task." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 149, + 144, + 184, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 144, + 184, + 163 + ], + "spans": [ + { + "bbox": [ + 149, + 144, + 184, + 163 + ], + "type": "text", + "content": "Riding Dirty" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 165, + 190, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 165, + 190, + 183 + ], + "spans": [ + { + "bbox": [ + 149, + 165, + 190, + 183 + ], + "type": "text", + "content": "A muddy mix of road \nThe wind is a bit \nCyclocross doubles the \nthrill of both sports. Here's \nthe gear to get you started." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 150, + 186, + 176, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 186, + 176, + 190 + ], + "spans": [ + { + "bbox": [ + 150, + 186, + 176, + 190 + ], + "type": "text", + "content": "by BERNSTEIN 100VY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 149, + 193, + 185, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 193, + 185, + 236 + ], + "spans": [ + { + "bbox": [ + 149, + 193, + 185, + 236 + ], + "type": "text", + "content": "Cyclosis is a cool term. It means to drop-hotelize bikes with little or no need to walk. It is a course that often includes a variety of exercises, such as as well as obstacles that force you to get your legs on the ground. A bike over your shoulder. \"All you need is a bike and have a good attitude and confidence in your ability to walk,\" says Stu Thorne, founder and CEO of the professional cyclosis team." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 214, + 198, + 252, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 198, + 252, + 203 + ], + "spans": [ + { + "bbox": [ + 214, + 198, + 252, + 203 + ], + "type": "text", + "content": "BEST PCHENTY LEVEL" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 216, + 204, + 302, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 204, + 302, + 221 + ], + "spans": [ + { + "bbox": [ + 216, + 204, + 302, + 221 + ], + "type": "text", + "content": "What you canificn with extra weight you make up for with a highly capable automobile frame that you'll want to keep rolling between trips. A carbon-bond tire helps eat and move the vehicle. The car also has a high quality底盘, an excellent底盘, and a 100%底盘 driven power through any grade. And TBP cable disc brakes perform well even when the vehicle is on the road. The steering system is also very useful to consider if you're looking for a bike that can travel all way." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 149, + 225, + 261, + 285 + ], + "blocks": [ + { + "bbox": [ + 149, + 225, + 261, + 285 + ], + "lines": [ + { + "bbox": [ + 149, + 225, + 261, + 285 + ], + "spans": [ + { + "bbox": [ + 149, + 225, + 261, + 285 + ], + "type": "image", + "image_path": "89a957bd1bb76655d3273172dac178614e0fa382484fd578c9fe8b83a2e59d2c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 263, + 225, + 304, + 243 + ], + "blocks": [ + { + "bbox": [ + 263, + 225, + 304, + 243 + ], + "lines": [ + { + "bbox": [ + 263, + 225, + 304, + 243 + ], + "spans": [ + { + "bbox": [ + 263, + 225, + 304, + 243 + ], + "type": "image", + "image_path": "579e0f2a185ee16ddbe4e2166f6260c9afac14b0b8d60d1d2a4c35e5ee16becc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 263, + 243, + 304, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 243, + 304, + 284 + ], + "spans": [ + { + "bbox": [ + 263, + 243, + 304, + 284 + ], + "type": "text", + "content": "A premium neck bike, this should be your next choice. The Carbone Factory offers a 2000 inch, 16-in. front bottom bracket and relatively easy to install. It's also suitable for hard, solid, hard or soft through cracks. This is the best way to get one of these items. They can be run tubes to better make it easier to use. They mean they spring freely when called on to do so. They are lightweight and fast- and something you won't have to deal with in special area $3,000" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 149, + 292, + 178, + 319 + ], + "blocks": [ + { + "bbox": [ + 149, + 292, + 178, + 319 + ], + "lines": [ + { + "bbox": [ + 149, + 292, + 178, + 319 + ], + "spans": [ + { + "bbox": [ + 149, + 292, + 178, + 319 + ], + "type": "image", + "image_path": "228803a2141e32fd5ffd82f71c5eb34605ec16fae536c99ed74406455884046c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 149, + 321, + 175, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 321, + 175, + 350 + ], + "spans": [ + { + "bbox": [ + 149, + 321, + 175, + 350 + ], + "type": "text", + "content": "Craft Shield Glove \nGlove \ngloves \ngloves from sailor \ngloves from sailor \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea \ngloves from sea" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 183, + 292, + 208, + 319 + ], + "blocks": [ + { + "bbox": [ + 183, + 292, + 208, + 319 + ], + "lines": [ + { + "bbox": [ + 183, + 292, + 208, + 319 + ], + "spans": [ + { + "bbox": [ + 183, + 292, + 208, + 319 + ], + "type": "image", + "image_path": "7c25e4962ffe267435cfb24669bd2212ff8b74cf4982030382e10e32dcb6c53a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 182, + 321, + 206, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 321, + 206, + 350 + ], + "spans": [ + { + "bbox": [ + 182, + 321, + 206, + 350 + ], + "type": "text", + "content": "Darn Tough Microer Crews can be a great way to complete your cyclical routines you know. The best way to get the best, fight, strong and cool are to do so with the \"power of the soul.\"" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 211, + 293, + 242, + 320 + ], + "blocks": [ + { + "bbox": [ + 211, + 293, + 242, + 320 + ], + "lines": [ + { + "bbox": [ + 211, + 293, + 242, + 320 + ], + "spans": [ + { + "bbox": [ + 211, + 293, + 242, + 320 + ], + "type": "image", + "image_path": "0e15e41f95fe19e01d7b93c2eae7c047b8338378b3a8e6dcb098bdad66bfe059.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 212, + 321, + 256, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 321, + 256, + 350 + ], + "spans": [ + { + "bbox": [ + 212, + 321, + 256, + 350 + ], + "type": "text", + "content": "Park Tool Rapho Brush Set Leng W 10mm 2. This tool usually dirts the skin and acts as a light source for key components. This kit the teeth are used to clean your bony look cheek off. You would need to purchase part.com e600 regal.co.uk" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 244, + 292, + 269, + 320 + ], + "blocks": [ + { + "bbox": [ + 244, + 292, + 269, + 320 + ], + "lines": [ + { + "bbox": [ + 244, + 292, + 269, + 320 + ], + "spans": [ + { + "bbox": [ + 244, + 292, + 269, + 320 + ], + "type": "image", + "image_path": "cf3f379731aab4c9a3ee26872f83496ba2430ebac37aa5b35a001d16ebb1bfbc.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 244, + 321, + 272, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 321, + 272, + 350 + ], + "spans": [ + { + "bbox": [ + 244, + 321, + 272, + 350 + ], + "type": "text", + "content": "Rapha Arm and Leg Warmers \nA has a lot of Lycia for the warm season. \nThe warm weather up, etc. \nThe warm air is coming from the warm air. \nLycia from E70" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 276, + 292, + 304, + 320 + ], + "blocks": [ + { + "bbox": [ + 276, + 292, + 304, + 320 + ], + "lines": [ + { + "bbox": [ + 276, + 292, + 304, + 320 + ], + "spans": [ + { + "bbox": [ + 276, + 292, + 304, + 320 + ], + "type": "image", + "image_path": "01d85e92cf288989a366cb6be307e072ac35ef3cfdd208c1716ed73746c60ccc.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 276, + 321, + 304, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 321, + 304, + 350 + ], + "spans": [ + { + "bbox": [ + 276, + 321, + 304, + 350 + ], + "type": "text", + "content": "Topeak SmartGaUGE D2 1000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 203, + 352, + 252, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 352, + 252, + 356 + ], + "spans": [ + { + "bbox": [ + 203, + 352, + 252, + 356 + ], + "type": "text", + "content": "MEN'S JOURNAL 94 NOVEMBER 2016" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 111, + 365, + 146, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 146, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 146, + 373 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 377, + 226, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 377, + 226, + 387 + ], + "spans": [ + { + "bbox": [ + 141, + 377, + 226, + 387 + ], + "type": "text", + "content": "OCR this image." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 396, + 165, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 396, + 165, + 405 + ], + "spans": [ + { + "bbox": [ + 111, + 396, + 165, + 405 + ], + "type": "text", + "content": "Perception-R1:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "spans": [ + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": " G E A R L A B\\nRiding Dirty\\n\\nA muddy mix of road- and mountain-bike racing, cyclocross doubles the thrill of both sports. Here's the gear to get you started.\\n\\nBryne Bredy\\n\\nCyclocross is as cool as it looks: Racers pedal drop-handlebikes with trail-ready tires through a course that often includes pavement, dirt, mud, and grass, as well as obstacles that force you to dismount and sling your bike over your shoulder. \"All you really need is to be tough and have a good attitude and a desire to suffer a little bit,\" says Stu Thorne, founder and director of the Cannondale professional cyclocross team. The right gear helps, too.\\nBest for Entry Level\\n\\nCAADX 105\\n\\nWhat you sacrifice with extra weight you make up for with a highly capable aluminum frame that'll keep you going between races. A carbon-blade fork helps eat up some of the pounding the courses can dish out. The dependable 2x11 speed Shimano 105 drivetrain powers through any grade, and TRP cable disc brakes perform well even in mud. Meanwhile, the 35cc tires and stable geometry make the CAADX a superfun commuter if you're looking for a bike that can multitask\\n\\nCannondale.com " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\mathbb{S}1,970\\backslash \\mathbb{n}\\backslash \\mathbb{n}" + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": "Best for Experienced Cyclists\\n\\nCruX Elite X1\\n\\nIf you already log long miles on a premium road bike, this should be your new cross rig. The carbon frame is as light as it is.\\n\\nLow bottom bracket and relatively slack head angle, which make it stable, helped us sail through rocks and roots and corner quickly. The tires can be run tubeless to better resist flats, and extra clearance means they spin freely when caked with mud. The CruX Elite is playful and fast — and something you won't outgrow as you collect medals.\\n\\nGlove\\n\\nThe cross season typically runs from September through February, so you'll need hearty gloves like these, with a fleece lining and a waterproof base, for warmth on wet race days. craftsports.us " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\mathbb{S}78\\backslash \\mathbb{n}\\backslash \\mathbb{N}" + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": "Darn Tough Micro Crew\\n\\nUnlike other bike races, cyclocross requires you to be on foot at times. So light, strong socks are key. These aren't likely to wear out, but Darn Tough will replace them if they do. darntough.com " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\mathbb{S}18\\backslash \\mathbb{n}\\backslash \\mathbb{N}" + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": "Park Tool Brush Set\\n\\nThe mud, dirt, and grime that builds up during off-road rides can damage key components. This kit does more than just keep your bike looking fresh; it keeps it healthy, too. parktool.com " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\mathbb{S}80\\backslash \\mathbb{n}\\backslash \\mathbb{n}" + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": "Rapha Arm and Leg Warmer\\n\\nThese merino layers, which have a bit of Lycra for stretch, peel off easily when the weather warms up. And they dry quickly, whether you sweat profusely or get caught in a sudden squall. rapha.cc From " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\mathbb{S}70\\backslash \\mathbb{n}\\backslash \\mathbb{N}" + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "text", + "content": "Topeak SmartGauge D2\\n\\nFor peak performance, adjust your tire pressure to suit the terrain. (On soft trails, lower pressure makes it grip better.) The SmartGauge makes it a snap with a fast, readable result. topeak.com " + }, + { + "bbox": [ + 136, + 411, + 499, + 631 + ], + "type": "inline_equation", + "content": "\\$ 40" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 152, + 123, + 299, + 223 + ], + "blocks": [ + { + "bbox": [ + 129, + 121, + 149, + 130 + ], + "lines": [ + { + "bbox": [ + 129, + 121, + 149, + 130 + ], + "spans": [ + { + "bbox": [ + 129, + 121, + 149, + 130 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 152, + 123, + 299, + 223 + ], + "lines": [ + { + "bbox": [ + 152, + 123, + 299, + 223 + ], + "spans": [ + { + "bbox": [ + 152, + 123, + 299, + 223 + ], + "type": "image", + "image_path": "dbb8a8c2c2bf6102daf300bc5ccdebd12c743a5f0b46758ef0732eed7771661c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 331, + 123, + 479, + 222 + ], + "blocks": [ + { + "bbox": [ + 303, + 123, + 328, + 131 + ], + "lines": [ + { + "bbox": [ + 303, + 123, + 328, + 131 + ], + "spans": [ + { + "bbox": [ + 303, + 123, + 328, + 131 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 331, + 123, + 479, + 222 + ], + "lines": [ + { + "bbox": [ + 331, + 123, + 479, + 222 + ], + "spans": [ + { + "bbox": [ + 331, + 123, + 479, + 222 + ], + "type": "image", + "image_path": "cd6340a67f4aa70711d979225ba22f60251b0956c62633a3ebc28b4e3e8649cf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 172, + 289, + 436, + 300 + ], + "lines": [ + { + "bbox": [ + 172, + 289, + 436, + 300 + ], + "spans": [ + { + "bbox": [ + 172, + 289, + 436, + 300 + ], + "type": "text", + "content": "Figure 5: Demo case of Percpetion-R1 on visual grounding task." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 226, + 160, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 226, + 160, + 235 + ], + "spans": [ + { + "bbox": [ + 129, + 226, + 160, + 235 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 237, + 382, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 237, + 382, + 246 + ], + "spans": [ + { + "bbox": [ + 156, + 237, + 382, + 246 + ], + "type": "text", + "content": "Output the bounding box of the woman holding a child in the image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 254, + 176, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 254, + 176, + 262 + ], + "spans": [ + { + "bbox": [ + 129, + 254, + 176, + 262 + ], + "type": "text", + "content": "Perception-R1:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 156, + 265, + 296, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 265, + 296, + 274 + ], + "spans": [ + { + "bbox": [ + 156, + 265, + 296, + 274 + ], + "type": "text", + "content": "The woman holding a child(157,459), (208,622)" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 153, + 406, + 298, + 504 + ], + "blocks": [ + { + "bbox": [ + 130, + 406, + 150, + 415 + ], + "lines": [ + { + "bbox": [ + 130, + 406, + 150, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 406, + 150, + 415 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 153, + 406, + 298, + 504 + ], + "lines": [ + { + "bbox": [ + 153, + 406, + 298, + 504 + ], + "spans": [ + { + "bbox": [ + 153, + 406, + 298, + 504 + ], + "type": "image", + "image_path": "e711809a55693e7d0cf74a6a3facccda9fe576ff5667b5ae1184515cb89ee65d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 649, + 504, + 673 + ], + "lines": [ + { + "bbox": [ + 104, + 649, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 504, + 673 + ], + "type": "text", + "content": "Figure 6: Demo case of Percpception-R1 on general object detection task. The color of bounding boxes correspond to the category." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 330, + 407, + 475, + 503 + ], + "blocks": [ + { + "bbox": [ + 302, + 407, + 327, + 415 + ], + "lines": [ + { + "bbox": [ + 302, + 407, + 327, + 415 + ], + "spans": [ + { + "bbox": [ + 302, + 407, + 327, + 415 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 330, + 407, + 475, + 503 + ], + "lines": [ + { + "bbox": [ + 330, + 407, + 475, + 503 + ], + "spans": [ + { + "bbox": [ + 330, + 407, + 475, + 503 + ], + "type": "image", + "image_path": "10d999c72f599129e248ca707edf35cc4f10c71f663e2cb1c44587b8cc6aec3a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 510, + 160, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 510, + 160, + 518 + ], + "spans": [ + { + "bbox": [ + 129, + 510, + 160, + 518 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 156, + 521, + 391, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 521, + 391, + 530 + ], + "spans": [ + { + "bbox": [ + 156, + 521, + 391, + 530 + ], + "type": "text", + "content": "Please output bbox coordinates and names of {90 categories of COCO}." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 537, + 177, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 537, + 177, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 537, + 177, + 545 + ], + "type": "text", + "content": "Perception-R1:" + } + ] + } + ], + "index": 15 + }, + { + "type": "code", + "bbox": [ + 157, + 547, + 304, + 630 + ], + "blocks": [ + { + "bbox": [ + 157, + 547, + 304, + 630 + ], + "lines": [ + { + "bbox": [ + 157, + 547, + 304, + 630 + ], + "spans": [ + { + "bbox": [ + 157, + 547, + 304, + 630 + ], + "type": "text", + "content": "[ {\"bbox_2d\": [435, 1, 616, 415], \"label\": \"person\"}, {\"bbox_2d\": [218, 112, 323, 253], \"label\": \"person\"}, {\"bbox_2d\": [147, 165, 225, 280], \"label\": \"person\"}, {\"bbox_2d\": [183, 126, 239, 242], \"label\": \"person\"}, {\"bbox_2d\": [276, 77, 338, 192], \"label\": \"person\"}, {\"bbox_2d\": [127, 54, 471, 406], \"label\": \"laptop\"}, {\"bbox_2d\": [108, 174, 156, 243], \"label\": \"cell phone\"}, {\"bbox_2d\": [26, 107, 103, 214], \"label\": \"potted plant\"}, {\"bbox_2d\": [64, 29, 280, 137], \"label\": \"chair\"} ]" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "code_body" + } + ], + "index": 16, + "sub_type": "code", + "guess_lang": "json" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_content_list.json b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2f54245f8807e13b71f93c69cc36c3eee42ed891 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_content_list.json @@ -0,0 +1,4519 @@ +[ + { + "type": "text", + "text": "MM-IFEngine: Towards Multimodal Instruction Following", + "text_level": 1, + "bbox": [ + 199, + 130, + 799, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shengyuan Ding $^{1,2*}$ , Shenxi Wu $^{1,2*}$ , Xiangyu Zhao $^{2,3}$ , Yuhang Zang $^{2\\boxtimes}$ , Haodong Duan $^{2}$ , Xiaoyi Dong $^{2}$ , Pan Zhang $^{2}$ , Yuhang Cao $^{2}$ , Dahua Lin $^{2,4,5}$ , Jiaqi Wang $^{2,6\\boxtimes}$ $^{1}$ Fudan University $^{2}$ Shanghai AI Laboratory $^{3}$ Shanghai Jiaotong University $^{4}$ The Chinese University of Hong Kong $^{5}$ CPII under InnoHK $^{6}$ Shanghai Innovation Institute", + "bbox": [ + 119, + 178, + 875, + 252 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg", + "image_caption": [ + "(a) Current MMIF Bench", + "1. Answer as if you are facing to the audience. \n2. Use No more than 60 words....", + "Figure 1. (a) Limitations of existing Multimodal Instruction Following (IF) benchmarks. (b) Overview of the MM-IFEval benchmark, which significantly surpasses existing benchmarks in terms of constraint diversity, quantity, and instruction complexity. Our benchmark consists of Compose-Level (C-Level) problems that impose constraints on model outputs (e.g., format requirements, keyword limits) and Perception-Level (P-Level) problems that require reasoning about specific visual elements in images. (c) Our MM-IFEngine generates a large-scale, diverse training dataset suitable for both Supervised Fine-Tuning (SFT) and Direct Preference Optimization (DPO)." + ], + "image_footnote": [], + "bbox": [ + 98, + 287, + 269, + 481 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg", + "image_caption": [ + "Various & Abundant" + ], + "image_footnote": [], + "bbox": [ + 277, + 273, + 349, + 308 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg", + "image_caption": [ + "Constraints", + "MTA-Bench (About 1k constraints)", + "(300 questions)" + ], + "image_footnote": [], + "bbox": [ + 352, + 273, + 496, + 344 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg", + "image_caption": [ + "(b) MM-IFEval Benchmark", + "follow instruction" + ], + "image_footnote": [], + "bbox": [ + 441, + 375, + 478, + 397 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To Say \nyou are the musician \nimage. Write about your \ns and feelings while \ning.", + "bbox": [ + 383, + 406, + 482, + 446 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg", + "image_caption": [ + "Constraints" + ], + "image_footnote": [], + "bbox": [ + 279, + 393, + 339, + 441 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg", + "image_caption": [ + "32 Categories of Constraints", + "5.1 Average constraints" + ], + "image_footnote": [], + "bbox": [ + 509, + 272, + 535, + 290 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg", + "image_caption": [ + "3 Evaluation metrics combined" + ], + "image_footnote": [], + "bbox": [ + 509, + 315, + 535, + 334 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 340, + 535, + 358 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg", + "image_caption": [ + "400 high-quality samples", + "100 Perception-Level" + ], + "image_footnote": [], + "bbox": [ + 593, + 359, + 627, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To See letters can you identify covering the right half e poster? Output in from top to bottom and o right separated with '", + "bbox": [ + 527, + 402, + 630, + 454 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "R,e,a,d,i,n,a,f,u", + "bbox": [ + 504, + 462, + 612, + 474 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg", + "image_caption": [ + "Instruction", + "Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 647, + 393, + 700, + 441 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg", + "image_caption": [ + "(c) MM-IF Dataset SFT & DPO", + "23kData generated with our MMTE Engine" + ], + "image_footnote": [], + "bbox": [ + 723, + 306, + 833, + 358 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg", + "image_caption": [ + "Compose a brief poem inspired by the cozy and serene." + ], + "image_footnote": [], + "bbox": [ + 849, + 335, + 879, + 359 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg", + "image_caption": [ + "Each stanza should have 4 lines. Your output should include a metaphor", + "In a room where light gently plays, \nA haven carved from nature's ways. \nThe river whispers calm and clear, \nSerene as thoughts that banish fear..." + ], + "image_footnote": [], + "bbox": [ + 710, + 407, + 733, + 433 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg", + "image_caption": [ + "In a world of hustle and bustle \nA haven of peace and solitude.", + "Soft curtains dance in the breeze. As the sun's rays gently caress." + ], + "image_footnote": [], + "bbox": [ + 712, + 448, + 733, + 469 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 877, + 412, + 897, + 429 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 877, + 450, + 897, + 468 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 250, + 593, + 325, + 607 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Instruction Following (IF) ability measures how well Multi-modal Large Language Models (MLLMs) understand exactly what users are telling them and whether they are doing it right. Existing multimodal instruction following training data is scarce, the benchmarks are simple with atomic instructions, and the evaluation strategies are imprecise for tasks demanding exact output constraints. To address this, we present MM-IFEngine, an effective pipeline to generate high-quality image-instruction pairs. Our MM-IFEngine pipeline yields large-scale, diverse, and high-quality training data MM-IFInstruct-23k, which is suitable for Supervised Fine-Tuning (SFT) and extended as MM-IFDPO-23k for Direct Preference Optimization (DPO). We further introduce MM-IFEval, a challenging and diverse multi-modal instruction-following benchmark that includes (1) both compose-level constraints for output re", + "bbox": [ + 89, + 627, + 485, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sponses and perception-level constraints tied to the input images, and (2) a comprehensive evaluation pipeline incorporating both rule-based assessment and judge model. We conduct SFT and DPO experiments and demonstrate that fine-tuning MLLMs on MM-IFInstruct-23k and MM-IFDPO-23k achieves notable gains on various IF benchmarks, such as MM-IFEval $(+10.2\\%)$ , MIA $(+7.6\\%)$ , and IFEval $(+12.3\\%)$ . We have fully open-sourced the datasets (both SFT and DPO), evaluation code and training scripts at https://github.com/SYuan03/MM-IFEngine.", + "bbox": [ + 511, + 580, + 908, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 513, + 767, + 643, + 781 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Instruction Following (IF) is a fundamental ability in Large Language Models (LLMs) [14, 27, 35, 53, 57] and Multimodal Large Language Models (MLLMs) [2, 34], which involves accurately interpreting and executing user-provided instructions. This ability is crucial for deploying models in real-world applications where users expect precise and context-aware responses, such as code", + "bbox": [ + 511, + 794, + 906, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution.☑ Corresponding authors.", + "bbox": [ + 112, + 886, + 370, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07957v2 [cs.CV] 27 Apr 2025", + "bbox": [ + 22, + 263, + 58, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "generation [44], visual question answering [17], robots [38], and creative content creation [58]. For instance, in a VQA scenario, when a user asks an MLLM what is the object and how do I use it, return the object name and the usage instructions in a JSON format, accurate IF ensures the model provides a response like {object': 'hammer', 'usage': 'use it to drive nails'} instead of the plain text.", + "bbox": [ + 89, + 90, + 482, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Achieving precise IF in multimodal, diverse, and open-ended environments presents significant challenges for both model training and benchmark evaluation. One significant limitation is the scarcity of high-quality IF training data to train open-source MLLMs. In addition, current multimodal IF benchmarks [2, 34] merely have simple, atomic instructions, and the constraints are weakly correlated with visual content (see Fig. 1 (a)). Consequently, existing benchmarks lack the diversity required for real-world applications, leading to saturated results where nearly all models achieve over $80\\%$ . Furthermore, the evaluation method in existing benchmarks often relies on LLM-as-a-judge [56], which is imprecise for instructions demanding exact output constraints, such as word counts. Therefore, the combination of limited training data, simple benchmarks, and imprecise evaluation strategy strongly restricts the progress of current MLLMs in IF.", + "bbox": [ + 91, + 214, + 486, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the lack of high-quality IF training data and challenging benchmarks, we propose MM-IFEngine, an effective pipeline for generating high-quality image-instruction pairs. MM-IFEngine collects diverse image sources, including natural scenes, UI interfaces, diagrams, charts, and mathematical problems. We then employ a structured approach using a predefined set of 16 task descriptions and 32 constraints to guide the LLM in crafting tailored instructions for each image. Using MM-IFEngine, we generated a comprehensive dataset of image-instruction pairs, collected responses from open-source MLLMs, and applied rigorous post-processing to retain only high-quality instruction-answer pairs, thus constructing MM-IFInstruct-23k for Supervised Fine-Tuning (SFT). We also generate negative responses by selectively removing constraints from the original data, constructing the preference dataset MM-IFDPO-23k for preference optimization algorithms such as Direct Preference Optimization (DPO) [36].", + "bbox": [ + 89, + 474, + 486, + 746 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To facilitate the evaluation of multimodal IF, we present MM-IFEval, a benchmark comprising 400 challenging problems with diverse compose-level and perception-level instructions. MM-IFEval is derived from the images and instructions generated by MM-IFEngine with human-labeled annotations. As presented in Fig. 1 (b), our MM-IFEval has the following three distinctive features: (1) Diverse Instruction Types: MM-IFEval has 32 distinct constraints, ensuring a wide range of instruction complexities and surpassing the scope of prior benchmarks. (2) Hybrid Evaluation: we use", + "bbox": [ + 89, + 750, + 486, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "a hybrid strategy including both rule-based verification and judge model. For subjective instructions (e.g., mimicking tone), we design a comparative judgment for precise evaluation. Specifically, a control output is generated without the constraint, and the LLM judge compares both outputs for precise evaluation. (3) Challenging: the leading proprietary model (GPT-4o at $64.6\\%$ ) and open-source model (Qwen2-VL-72B at $50.8\\%$ ) demonstrating substantial room for improvement on our benchmark, highlights a significant opportunity for improvement in multimodal instruction following.", + "bbox": [ + 511, + 90, + 908, + 256 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We further demonstrate that fine-tuning MLLMs on either MM-IFInstruct-23k or MM-IFDPO-23k consistently boosts the performance of MLLMs on instruction following benchmarks, without compromising their original capabilities on other Visual Question Answering (VQA) benchmarks. Specifically, fine-tuning Qwen2-VL-7B on MM-IFDPO-23k with the DPO results in performance gains of $10.2\\%$ , $7.6\\%$ , and $12.3\\%$ on MM-IFInstruct-23k, MIA-Bench [34], and IFEval [57], respectively.", + "bbox": [ + 511, + 257, + 908, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions include: (1) a MM-IFEngine pipeline for generating multimodal constraint-rich image-instruction pairs; (2) a large-scale training dataset MM-IFInstruct-23k and preference optimization dataset MM-IFDPO-23k derived from MM-IFEngine; (3) a challenging multimodal instruction following benchmark MM-IFEval with diverse constraints and comprehensive evaluation approaches; and (4) empirical evidence showing significant performance gains on both our MM-IFEval and existing benchmarks when training MLLMs on MM-IFInstruct-23k via SFT and MM-IFDPO-23k via DPO.", + "bbox": [ + 511, + 393, + 910, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 571, + 656, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Instruction Following in LLMs. Various benchmarks and training approaches have been proposed to make Large Language Models (LLMs) better align with human instructions. While existing Instruction Following (IF) benchmarks like [14, 35, 53, 57] all aim to evaluate instruction following, they differ significantly in their dataset construction pipelines, driven by their unique constraint taxonomies. CFBench [53], for instance, constructs its dataset using a combination of taxonomic and statistical methodologies to establish comprehensive constraints. This divergence extends to their evaluation strategies. For example, InFoBench [35] adopts a strategy of decomposing complex instructions into simpler assessment standards. Beyond benchmarks, various training approaches aim to enhance LLMs' instruction-following capabilities [29, 44], including in-context learning [58] and preference optimization [54]. However, he aforementioned research is limited to the text modality, whereas our work focuses on multi-modal instruction following with vision inputs.", + "bbox": [ + 511, + 598, + 908, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Instruction Following Benchmarks in MLLMs. Numerical", + "bbox": [ + 511, + 885, + 908, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg", + "image_caption": [ + "Figure 2. Overall pipeline of MM-IFEngine. Part (a) demonstrates the three-stage workflow of our engine: (1) Image filter; (2) Task generation using GPT-4o for images without QA pairs and instruct refinement for existing annotations; and (3) Constraints integration incorporating 6 main categories and 32 subcategories, ensuring compatibility between constraints and tasks. MM-IFEngine is employed to generate SFT and DPO training datasets and MM-IFEval benchmark, as shown in part (b) and (c). MM-IFEval implements three evaluation metrics combining rule-based verification functions and a judge model to ensure accurate assessment." + ], + "image_footnote": [], + "bbox": [ + 102, + 90, + 330, + 435 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 90, + 602, + 436 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 607, + 90, + 903, + 436 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ous benchmarks [18] have been proposed to evaluate diverse capabilities of Multi-modal Large Language Models (MLLMs), including general knowledge [5, 24, 48, 50], document understanding [15, 25, 30], perception [43, 52], multi-image comprehension [26, 39, 40], and instruction following (IF) [2, 34]. MIA-Bench [34] and VisIT-Bench [2] are representative IF benchmarks that employ GPT-4 [32] for question generation and evaluation. In contrast to existing IF benchmarks, our MM-IFEval introduces significant improvements in diversity (32 constraint categories covering compositional and perceptual aspects), difficulty (averaging 5.1 constraints per question), and evaluation precision (using both judge models and rule-based verification).", + "bbox": [ + 88, + 549, + 485, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instruction Tuning Data for MLLMs. Recent advancements in multi-modal instruction tuning data aim to improve cross-modal alignment and increase the variety of tasks handled by MLLMs [4, 8, 20, 26, 45, 46, 51]. For example, some previous works [3, 4, 23] build synthetic instruction tuning data generated using GPT-4V [33], enabling open-source MLLMs to achieve performance comparable to proprietary models across multiple benchmarks. However, existing instruction tuning data are mainly designed for general knowledge or visual perception, and data for", + "bbox": [ + 89, + 750, + 485, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "improving the IF abilities is scarce. The scarcity of training data for enhancing IF abilities motivated the development of our MM-IFEngine pipeline.", + "bbox": [ + 511, + 549, + 906, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. MM-IFEngine", + "text_level": 1, + "bbox": [ + 511, + 611, + 661, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We employ the MM-IFEngine pipeline to generate image-instruction pairs, which are the foundation for creating instruction tuning data and our benchmark. As shown in Fig. 2 (a), the pipeline is composed of three main stages: (1) image filtering, where we systematically select a diverse set of images from multiple sources to ensure broad coverage of visual content; (2) task generation, in which we either synthesize novel tasks tailored to the selected images or refine existing instruction templates to better align with the image content; and (3) constraint integration, where high-quality, constraint-aware instructions are generated for images that initially lack associated annotated guidance, thereby enhancing the richness and precision of the dataset.", + "bbox": [ + 511, + 638, + 908, + 835 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Image Filter", + "text_level": 1, + "bbox": [ + 511, + 847, + 645, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our image filtering strategy selects only high-quality images by removing those with low resolution or limited semantic", + "bbox": [ + 511, + 869, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "richness. For unannotated pure image datasets (e.g., CC3M [37]), we prioritize natural scene images. Rich semantic content in these images enables the creation of more comprehensive and insightful QA pairs, which is crucial for designing diverse and complex instruction following tasks. We use the IC9600 and RAM metric proposed in the previous method [55] to select the images that have rich semantic content.", + "bbox": [ + 89, + 90, + 480, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, we analyze existing annotated datasets, such as ALLaVA [3]. Our analysis reveals that some images suffer from low resolution, making them inadequate for the instruction-following task. Given our intention to design more intricate and varied instruction following tasks based on this data, we filter out data items containing low-quality images.", + "bbox": [ + 89, + 210, + 480, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Task Generation", + "text_level": 1, + "bbox": [ + 89, + 327, + 254, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Source without Original QA Pairs. For image datasets lacking original annotated task instructions (e.g., CC3M [37]), we first design appropriate task instructions for the data items. We first develop a series of task instructions tailored to the data items. These instructions are crafted to elicit long-form responses that can be subsequently modified or refined using various constraints, for instance, Provide a detailed analysis of the image, including the setting, characters, and notable objects. The final task pool $\\mathcal{P}_T$ comprises a total of 16 distinct tasks, with further details available in Appendix A.1.2.", + "bbox": [ + 89, + 348, + 482, + 513 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the task pool $\\mathcal{P}_T$ , we randomly select $k$ tasks as examples of task types for each image $I$ . We then prompt a powerful language model $\\mathcal{M}$ (e.g., GPT-4o) to generate an appropriate task list $T_l$ that aligns with the image content. The process is formulated as:", + "bbox": [ + 89, + 515, + 483, + 590 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{T _ {l} ^ {*} \\right\\} = \\mathcal {M} \\left(I, T _ {e}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 222, + 604, + 482, + 622 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $T_{e} = \\{T_{1}, T_{2}, \\ldots, T_{k}\\}$ and each $T_{i} \\in \\mathcal{P}_{T}$ . The model $\\mathcal{M}$ is tasked with either choosing relevant tasks from $T_{e}$ or supplementing reasonable tasks to construct the appropriate task list $T_{l}^{*}$ , ensuring that all tasks in $T_{l}^{*}$ are in line with the image content. After generating the $T_{l}^{*}$ , a sampling step is incorporated to guarantee task diversity. For each image, tasks are sampled. This sampling process is crucial as it enriches the variety of tasks associated with each image.", + "bbox": [ + 89, + 628, + 482, + 748 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Source with QA Pairs. In the case of image datasets that have QA pairs (e.g., ALLaVA [3]), we adopt certain strategies for processing the original question annotations. We choose ALLaVA as the primary dataset for this type of image source due to its rich and diverse image content, which is accompanied by a variety of task types. First, we conduct an analysis of the original question annotations. We find that some of the questions are accompanied by some few-shot examples. Additionally, some questions in ALLaVA have options in their original annotations, which are not", + "bbox": [ + 89, + 750, + 483, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "suitable for our instruction-following task. Since we need to incorporate certain constraints into the original instructions in the subsequent steps, we use regular expressions and length limits to filter the questions in ALLaVA. Specifically, we select those questions that do not have few-shot examples associated with them. Mathematically, if we let $Q$ be the set of all questions in ALLaVA, $Q_{fs}$ be the subset of questions with few-shot examples, and $Q_{op}$ be the subset of questions with options. We aim to find the subset $Q_{s}$ of questions that satisfy the conditions:", + "bbox": [ + 511, + 90, + 903, + 242 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nQ _ {s} = \\left\\{q \\in Q | q \\notin Q _ {f s} \\wedge q \\notin Q _ {o p} \\right\\} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 253, + 906, + 270 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the filtering based on the absence of few-shot examples and options is achieved using regular expressions and length limits. Then, we get the expected $T^{*}$ in our filter $Q_{s}$ set for the images.", + "bbox": [ + 511, + 280, + 906, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Constraints Integration", + "text_level": 1, + "bbox": [ + 511, + 349, + 730, + 366 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Constraints Pool $(\\mathcal{P}_C)$ We use instruction to refer to the entire textual input, which in our paper can generally be viewed as a composition of a task instruction and multiple constraints instruction. Tasks and constraints are rich and diverse, with a certain complexity in our work. All the constraints in our work can be further classified into six major categories, each with its own unique characteristics and applications: Text Length Requirements, Mathematical Requirements, Language & Formatting Requirements, Rhetoric & Logic Requirements, Action Requirements, and Keyword Requirements. Please refer to the Appendix Fig. 5 for more details of all the constraints.", + "bbox": [ + 511, + 372, + 906, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the constraints pool $\\mathcal{P}_C$ and task instructions, a straightforward approach for composing full instruction is to first set several constraints for each constraint type and then randomly select one constraint from some of the types to compose the constraint list, and finally concatenate the constraint list with the task instruction to form the full instruction. But this direct method has two problems: (1) The constraints are not diverse enough, which may not be able to fully evaluate the ability of the model. (2) The contradiction between the constraints and also between the constraints and the task instruction may exist. For the first problem, an LLM is employed to generate concrete content of constraint instruction for the specific constraint type in our method. In order to avoid the generated content being too divergent or hard to control its difficulty, we carefully design some cases or requirements of details that needed to be paid attention to when generating the content for each constraint type (Appendix A.1.1). For the second problem, we also use a powerful LLM to help keep the correlation of constraints with its instruction and filter out those that cause total contradiction. Finally, we prompt an LLM to check whether the constraints and the task instruction are compatible and filter out those failing to pass the check. Our method not only", + "bbox": [ + 511, + 554, + 908, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ensures the compatibility of constraints and instructions but also enriches the diversity of constraints.", + "bbox": [ + 89, + 90, + 482, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our actual practice process, we find that although we prompt the LLM to select appropriate constraints that should be compatible with the task instruction and other constraints, the generated constraints still have some contradiction with the task instruction, especially on those existing datasets with various kinds of annotations. The reason is that these datasets are designed for overall question-answering tasks, and the question(or named task instruction) tends to be contradictory with the constraints, which are mostly compatible with those tasks of creating or answering in non-short form. So, we decouple the selection and generation steps for this type of data source. Specifically, we first select the constraints from the constraints pool $\\mathcal{P}_C$ and then provide the selected mostly compatible constraints to the LLM to select secondly and generate final constraints. But for image datasets without original QA pairs, in other words, for which we generate task instructions for them using $\\mathcal{P}_T$ , we directly sample k constraint types for the LLM to generate concrete content because they are mostly compatible with the pre-designed task instruction. The uniform process is formulated as:", + "bbox": [ + 91, + 122, + 483, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {l} ^ {*} = \\mathcal {L} \\left(C _ {s}, T ^ {*}\\right), C _ {f} ^ {*} = \\mathcal {V} \\left(C _ {l} ^ {*}, T ^ {*}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 434, + 482, + 453 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{T}^*$ is the task applicable to the image. The model $\\mathcal{L}$ is tasked with both choosing appropriate constraint types from $C_s$ again and generating concrete constraints for some of them, whose output is a list of concrete constraint descriptions. To ensure that the generated constraints remain compatible with the given task instruction $T^*$ , we employ a final validation step using another LLM process, denoted as $\\mathcal{V}$ . This validation function checks whether each constraint in $C_l^*$ aligns with $T^*$ and filters out those that contradict or do not fit the task instruction. The resulting set of fully verified and compatible constraints is represented as $C_f^*$ .", + "bbox": [ + 89, + 462, + 482, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "MM-IFInstruct-23k Construction. By applying the MM-IFEngine pipeline, we construct the MM-IFInstruct-23k dataset, which contains 23k high-quality multi-modal instruction-following training data. We first take an analysis of the performance of the current open-source MLLMs and proprietary MLLMs on several benchmarks [25, 34], and find that for instruction-following capability, the most powerful open-source MLLM like InternVL2.5-78B-MPO [42] is nearly equivalent to GPT-4o, and the performance on general VQA benchmarks are even higher than GPT-4o. Thus, we use InternVL2.5-78B-MPO to generate responses for our MM-IFInstruct-23k dataset. Despite its capabilities, the InternVL2.5-78B-MPO model encounters difficulties in ensuring $100\\%$ compliance with our constraints, a challenge attributed to the complexity, number, and comprehensiveness. Consequently, we implement a post-processing stage to filter out responses that do not meet the specified criteria. Acknowledging that achieving perfect constraint adherence", + "bbox": [ + 89, + 628, + 482, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "might be challenging even for human annotators on this task, we set a practical accuracy threshold of $80\\%$ . Finally, our MM-IFInstruct-23k comprises 23k data items, with 16k constructed from the training set of CC3M, 6k from ALLaVA, and 4k from the training set of MultiUI, Geo170k[12] and ChartQA[31]. We show the distribution of constraints number of MM-IFInstruct-23k in Fig. 3.", + "bbox": [ + 511, + 90, + 906, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "MM-IFDPO-23k Construction. To comprehensively explore and make full use of our high-quality data, we also utilize MM-IFEngine to construct MM-IFDPO-23k, a preference dataset comprising chosen and rejected samples suitable for Direct Preference Optimization (DPO) [36]. Our high-quality data can be directly employed as the chosen samples. Regarding rejected samples, we opt to utilize Qwen2-VL-7B-Instruct to answer the variant of the question for generating rejected pairs. Specifically, we have four distinct settings for generating negative pairs, which mainly differ in the input to Qwen2-VL-7B-Instruct. These settings include (1) With image, but randomly remove one-third of the number of constraints in the prompt; (2) With image, but randomly remove two-thirds of the number of constraints in the prompt; (3) With image, but randomly remove all the constraints in the prompt; and (4) Full prompt, but without the image; We use these four types of input to feed into Qwen2-VL-7B-Instruct model, and collect the rejected responses to construct the MM-IFDPO-23k.", + "bbox": [ + 511, + 198, + 908, + 483 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. MM-IFEval", + "text_level": 1, + "bbox": [ + 513, + 496, + 638, + 512 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Existing benchmarks for multi-modal instruction following are scarce. The majority focus on simple and atomic instructions, resulting in performance saturation across models. To address this limitation, we introduce MM-IFEval, a human-annotated, comprehensive, and challenging benchmark designed for evaluating multi-modal IF.", + "bbox": [ + 511, + 522, + 906, + 613 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. MM-IFEval Construction", + "text_level": 1, + "bbox": [ + 511, + 622, + 746, + 636 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To construct the MM-IFEval, we first use our MM-IFEngine to generate the question-answer (QA) pairs for images. The generated instructions may inherently contain potential conflicts. Consequently, human annotation remains critical for constructing this benchmark, as human annotators possess the cognitive capacity for comprehensive assessment of these complex situations. After the human annotation, we further use an extra post-processing step that prompts the LLMs to double-check and mitigate the occurrence of constraint conflicts as much as possible. Finally, we construct the MM-IFEval bench of 400 questions, 300 of which are compose-level open-ended questions and 100 perception-level questions with ground truth.", + "bbox": [ + 511, + 643, + 908, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Diverse Constraints. With 32 distinct constraint categories and an average of 5.1 constraints per question, MM-IFEval presents a more challenging evaluation task compared to earlier benchmarks (e.g., [34], which has 8 categories and 2.6", + "bbox": [ + 511, + 839, + 908, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg", + "image_caption": [ + "Figure 3. Constraint Quantity Distribution in MM-IFInstruct-23k. Our MM-IFInstruct-23k exhibits systematic variation in constraint complexity, with each sample containing 3-12 constraints per instruction." + ], + "image_footnote": [], + "bbox": [ + 102, + 95, + 472, + 289 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "average constraints per question). Furthermore, our benchmark incorporates essential constraints such as \"Output in JSON format\", which is prevalent and practical in real-world scenarios, a feature not found in previous multi-modal instruction following benchmarks.", + "bbox": [ + 89, + 393, + 483, + 469 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Compose-level and Perception-level Questions. compose-level questions involve textual constraints, while perception-level questions require greater visual perception ability to solve. The perception-level questions incorporate a variety of image sources, such as natural scenes, user interfaces, diagrams, table charts, and mathematical expressions, which we believe are representative of real-world applications. Please refer to the Appendix for examples of compose-level and perception-level questions.", + "bbox": [ + 89, + 470, + 483, + 607 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Hybrid Evaluation", + "text_level": 1, + "bbox": [ + 89, + 619, + 272, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Current multi-modal instruction following benchmarks often rely solely on GPT-4o for evaluation. However, accurately assessing certain constraints, such as numerical conditions (e.g., 'output in 200 words', 'Answer in 5 paragraphs', 'Use the word 'cat' in the answer twice'), remains challenging even for GPT-4o. In contrast, verifiable functions like string matching offer greater precision than judge models for such constraints. To address this, we propose a hybrid evaluation strategy (see Fig. 2(c)) that employs three methods, including both rule-based Verification and judge models for more robust and precise evaluation.", + "bbox": [ + 89, + 642, + 485, + 823 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(1) Rule-based Verification. For constraints that adhere to a fixed format and involve specific content that can be objectively verified—yet remain challenging for an LLM to assess accurately—we employ a rule-based approach. Specifically, we design a set of predefined functions for different con", + "bbox": [ + 89, + 825, + 485, + 901 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg", + "image_caption": [ + "Figure 4. Constraint Category Distribution inCompose-Level Problems of MM-IFEval. This part comprises six primary constraint categories with 32 subcategories, forming a multi-level taxonomy for instruction-following evaluation." + ], + "image_footnote": [], + "bbox": [ + 566, + 90, + 836, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "strand types. The LLM is first prompted to extract the relevant parameters, denoted as Params, from the constraint description. When evaluating a constraint that falls within the scope of our rule-based framework, we use Params and the model's output as inputs to the predefined function to determine compliance.", + "bbox": [ + 511, + 393, + 906, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(2) LLM-based Direct Judgment. This method is primarily used for evaluating constraints that can be easily and unambiguously verified based on the model's output. It is applicable to constraints where correctness is straightforward to determine, such as those requiring the inclusion of specific words or phrases. For instance, a constraint like \"Use the word 'inspiration' or its synonyms at least twice in the response\" does not follow a strict format and cannot be assessed using a rule-based approach. Instead, we directly leverage an LLM to determine whether the constraint is satisfied.", + "bbox": [ + 511, + 484, + 908, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(3) LLM-based Comparative Judgment. Some constraints, particularly those related to tone, style, or role-playing, are difficult to evaluate directly. To improve judgment accuracy, we adopt a comparative approach. Specifically, we generate a second model output using a nearly identical prompt but without the constraint under evaluation. The LLM-based evaluator is then provided with both outputs and asked to compare them, determining whether the model's response with the constraint in the prompt adheres more closely to the expected requirement.", + "bbox": [ + 511, + 651, + 908, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 511, + 815, + 645, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Benchmarks. We select the following benchmarks to demonstrate that models fine-tuned on MM-IFInstruct-23k and MM-IFDPO-23k enhance instruction following without compromising performance on other VQA tasks: (1)", + "bbox": [ + 511, + 839, + 906, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7d4bda3009acb1f7beb7ea117256abba0234caba3ad6dd0ed42754e5c74ba40c.jpg", + "table_caption": [ + "Table 1. Main results on Instruction Following benchmarks, including our proposed MM-IFEval, MIA-Bench [34], and IFEval [57]. The symbol ${}^{\\mathrm{M}}$ refers to multimodal benchmarks,and ${}^{\\mathrm{T}}$ denotes text-only benchmarks. We report both compose-level (\"C\") and perception-level (\"P\") for MM-IFEval,prompt-level accuracy (\"Prompt.\")andInst-level accuracy (\"Inst.\")for IFEval,and the averaged results across all three benchmarks in the rightmost column." + ], + "table_footnote": [], + "table_body": "
ModelParameterMM-IFEvalM(ours)MIA MIFTAvg.
CPAvg.Prompt.Inst.Avg.
LLaVA-NeXT-7B [21]7B36.816.031.673.232.043.337.747.5
LLaVA-OneVision-Qwen2-7B-OV [16]8B37.424.034.084.543.354.849.055.8
InternVL2-8B [7]8B45.232.041.986.244.657.050.859.6
InternVL2.5-8B [6]8B49.636.046.288.552.262.457.364.0
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.783.345.056.450.757.9
w. MM-IFInstruct-23k-59.319.049.2 +9.586.5 +3.250.861.856.3 +5.664.0 +6.1
w. MM-IFDPO-23k-58.721.049.3 +9.690.0 +6.764.573.769.1 +18.469.5 +11.6
Qwen2-VL-7B-Instruct [41]8B42.740.042.080.542.452.547.456.6
w. MM-IFInstruct-23k-57.038.052.3 +10.387.7 +7.246.858.452.6 +5.264.2 +7.6
w. MM-IFDPO-23k-55.243.052.2 +10.288.1 +7.655.264.359.7 +12.366.7 +10.1
", + "bbox": [ + 93, + 148, + 929, + 319 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6b5b9b93d1fcd79a581602fab86639d6577f9b424223acb45e02e3004e7e0e55.jpg", + "table_caption": [ + "Table 2. Main results on VQA benchmarks, including general knowledge (MMMU [50], MMBench [24], MMStar [5], MMT-Bench [48]), document understanding (AI2D [15], OCRBench [25]), Chat (MMVet [49]) and Hallusion (POPE [19]). Fine-tuning models on MM-IFDPO-23k achieve comparable performance across these benchmarks." + ], + "table_footnote": [], + "table_body": "
ModelGeneralDocumentChatHallusion
MMMUvalMMBenchdevMMStarMMT-BenchvalAI2DOCRBenchMM VetPOPEAvg.
LLaVA-NeXT-Llama3-8B [21]43.772.543.653.173.155.043.387.258.9
w. MM-IFInstruct-23k45.869.344.253.371.255.346.388.859.3
w. MM-IFDPO-23k44.172.143.753.172.356.743.986.859.1
Qwen2-VL-7B-Instruct [41]53.981.060.863.282.986.763.386.372.3
w. MM-IFInstruct-23k54.079.357.161.081.681.861.689.270.7
w. MM-IFDPO-23k54.081.358.563.783.386.866.185.772.4
", + "bbox": [ + 93, + 378, + 919, + 494 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Instruction Following benchmarks, including MIA-Bench [34], IFEval [57], and our proposed MM-IFEval. To be noted, IFEval is a language-only benchmark while others are both multi-modal benchmarks. (2) VQA Benchmarks, including MMMU [50], MMBench [24], MMStar [5], AI2D [15], OCRBench [25], MMVet [49], POPE [19] and MMT-Bench [48].", + "bbox": [ + 88, + 520, + 480, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Implementation Details. We conducted SFT and DPO fine-tuning experiments on two representative MLLMs: Qwen2-VL-7B-Instruct [41] and LLaVA-Next-Llama3-8B [21], using our custom datasets MM-IFInstruct-23k for supervised fine-tuning (SFT) and MM-IFDPO-23k for direct preference optimization (DPO). For the SFT phase, we used a batch size of 128 and a learning rate of 1e-5. For the DPO phase, we used a learning rate of 5e-7 with the batch size of 16. We implemented our training pipeline with the help of LLaMAFactory and evaluation pipeline under VLMEvalkit [10].", + "bbox": [ + 88, + 626, + 482, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1. Results about MM-IFInstruct-23k and MM-IFDPO-23k", + "text_level": 1, + "bbox": [ + 89, + 787, + 482, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Consistently Improvements on Instruction Following Benchmarks. As shown in Tab. 1, both MM-IFInstruct-23k and MM-IFDPO-23k significantly enhance the model's performance in instruction following benchmarks. Finetuning LLaVA-Next and Qwen2-VL on MM-IFInstruct-23k", + "bbox": [ + 89, + 825, + 483, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "yielded significant averaging performance gains of $6.1\\%$ and $7.6\\%$ points, respectively. Furthermore, applying DPO with MM-IFDPO-23k also led to notable improvements for LLaVA-Next and Qwen2-VL, with average gains of $11.6\\%$ and $10.1\\%$ points. Such improvements demonstrate the effectiveness of MM-IFEngine in constructing high-quality training data.", + "bbox": [ + 511, + 520, + 906, + 626 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparable Results on VQA Benchmarks. To show that fine-tuning on MM-IFInstruct-23k and MM-IFDPO-23k improves instruction following without degrading performance on other VQA tasks, we analyzed model performance on other widely used benchmarks, as detailed in Tab. 2. Results indicate that models fine-tuning with MM-IFInstruct-23k and MM-IFDPO-23k demonstrate comparable performance across these benchmarks.", + "bbox": [ + 511, + 642, + 908, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "SFT vs DPO. As evidenced by Tab. 1 and Tab. 2, DPO using MM-IFDPO-23k significantly surpasses SFT on MM-IFInstruct-23k. This is likely due to negative samples of DPO, which are essential for training models to respect constraints, particularly in our data with multiple and diverse constraints. Additionally, the Kullback-Leibler (KL) divergence in DPO preserves the model's generalization, as demonstrated in Tab. 2.", + "bbox": [ + 511, + 779, + 908, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/7d566bf817e91560624919137435e2e54084d3f343d7e69395cddc8f69c2ba35.jpg", + "table_caption": [ + "Table 3. Evaluation of various MLLMs on MM-IFEval. We report the accuracy of easy and difficult problems and the average accuracy across all problems. The C-Level and P-Level refer to the compose-level and perception-level problems, respectively. The best performance in each section is highlighted in bold." + ], + "table_footnote": [], + "table_body": "
ModelParamC-LevelP-LevelAvg.
Proprietary MLLMs
Claude-3.5V-Sonnet [1]-67.544.061.7
GPT-4o-mini [13]-70.440.062.8
GPT-4o (20240806) [13]-71.544.064.6
Open-Source MLLMs
LLaVA-NeXT-7B [21]7B36.816.031.6
LLaVA-OneVision-Qwen2-7b-OV [16]8B37.424.034.0
MiniCPM-V-2.6 [47]8B39.232.037.4
InternVL2-8B [7]8B45.232.041.9
InternVL2-40B [7]40B48.036.045.0
InternVL2.5-8B [6]8B49.636.046.2
InternVL2.5-26B [6]8B53.532.048.1
Qwen2-VL-72B-Instruct [41]72B53.443.050.8
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.7
+ MM-IFDPO-23k-58.721.049.3
Qwen2-VL-7B-Instruct [41]8B42.740.042.0
+ MM-IFDPO-23k-55.243.052.2
", + "bbox": [ + 91, + 162, + 483, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2. Leaderboard of MM-IFEval", + "text_level": 1, + "bbox": [ + 89, + 424, + 344, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present the performance comparison results of various MLLMs on our MM-IFEval in Tab. 3, including both proprietary MLLMs such as GPT-4o [13] and Claude-3.5 [1] and open-source MLLMs such as LLaVA-Next [21], LLaVA-OneVision [16], InternVL [6, 7], and Qwen2-VL [41].", + "bbox": [ + 89, + 446, + 482, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MM-IFEval is Challenging. Results on Tab. 3 demonstrate that multimodal instruction following is still a challenging and unsolved task for current MLLMs, specifically for the perception-level problems. The propriety models GPT-4o and Claude-3.5V-Sonnet establish top-tier average performance with scores of 64.6 and 61.7, respectively. The leading open-source MLLM, Qwen2-VL-72B merely achieves an overall accuracy of 50.8. We attribute the performance gap between proprietary and open-source models to the scarcity of high-quality open-source training data for instruction following. As a result of our MM-IFDPO-23k, Qwen2-VL-7B fine-tuned via our optimized DPO approach achieves a score of 52.2, demonstrating a $24.3\\%$ relative improvement over its baseline (42.0), and even surpasses the larger Qwen2VL-72B model. We hope our MM-IFEval benchmark motivates further exploration into improving MLLM instruction-following.", + "bbox": [ + 89, + 521, + 482, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Benchmark Examples. Please refer to the Appendix for visual examples of MM-IFEval, including images and instructions with constraints for both compose-level and perception-level problems.", + "bbox": [ + 89, + 777, + 483, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 89, + 848, + 254, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation Studies on Different DPO Settings. In Tab. 4, we present an ablation study on various strategies for con", + "bbox": [ + 89, + 869, + 483, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3d7a238954389d9ee0e5757dcc7bb6a09455c5244976e249843db6c5a9f8ab02.jpg", + "table_caption": [ + "Table 4. Ablation studies across different DPO settings, including randomly deleting constraints (second row to fourth row) or prompting MLLMs without images (bottom row) to generate negative responses. Avg. refers to the average score of three IF benchmarks." + ], + "table_footnote": [], + "table_body": "
ModelMM-IFEvalMIAIFEvalAvg.
Qwen2-VL-7B-Instruct42.080.547.456.6
+ DPO (-33% cons)51.588.257.965.8
+ DPO (-66% cons)51.288.058.465.9
+ DPO (-100% cons)52.288.159.766.7
+ DPO (w/o img)48.486.954.763.4
LLaVA-NeXT-Llama3-8B39.783.350.757.9
+ DPO (-33% cons)50.487.264.367.3
+ DPO (-66% cons)48.786.869.768.4
+ DPO (-100% cons)49.390.069.169.5
+ DPO (w/o img)44.785.964.865.2
", + "bbox": [ + 517, + 162, + 903, + 321 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "structuring pairwise preference data for Direct Preference Optimization (DPO). These strategies primarily include: (1) generating rejected responses by randomly removing constraints from the instruction (second to fourth rows), and (2) prompting MLLMs without providing image inputs to generate rejected responses (bottom row).", + "bbox": [ + 511, + 342, + 906, + 434 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct experiments on both the Qwen2-VL-7B-Instruct and LLaVA-NeXT-Llama3-8B models. As shown in Tab. 4, all DPO variants exhibit strong robustness, consistently outperforming the baseline. Among the four evaluated strategies, removing $100\\%$ of the constraints to generate rejected responses achieves the best performance, whereas omitting image inputs yields the weakest performance. Furthermore, we observe a consistent trend: as the proportion of removed constraints increases from $33\\%$ to $100\\%$ , the performance of the resulting DPO models improves accordingly. This suggests that removing more constraints amplifies the semantic gap between preferred and rejected responses, thereby enhancing the effectiveness of contrastive learning during DPO training.", + "bbox": [ + 511, + 434, + 908, + 645 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Based on these findings, we adopt the $100\\%$ -constraint removal strategy as the default approach for constructing the DPO data in MM-IFDPO-23k.", + "bbox": [ + 511, + 646, + 906, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 708, + 633, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper contributes to the field of multimodal instruction-following by exploring pipelines for training data collection and proposing a challenging benchmark. We present MM-IFEngine, a pipeline designed to generate image-instruction pairs, subsequently used to construct MM-IFInstruct-23k for SFT and MM-IFDPO-23k for DPO. We also analyze the limitations of existing multimodal instruction following benchmarks and propose MM-IFEval, a benchmark featuring diverse instruction types and a hybrid evaluation strategy that combines rule-based methods with an LLM-based judge. We hope this work inspires further research into improving the", + "bbox": [ + 511, + 734, + 908, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "instruction-following ability of Multimodal Large Language Models, a critical step towards realizing their potential in diverse and impactful applications.", + "bbox": [ + 89, + 90, + 485, + 137 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 151, + 187, + 167 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anthropic. Claude 3.5 sonnet. 2024. 8", + "[2] Yonatan Bitton, Hritik Bansal, Jack Hessel, Rulin Shao, Wanrong Zhu, Anas Awadalla, Josh Gardner, Rohan Taori, and Ludwig Schmidt. VisIT-Bench: A benchmark for vision-language instruction following inspired by real-world use. In NeurIPS, Datasets and Benchmarks, 2023. 1, 2, 3", + "[3] Guiming Hardy Chen, Shunian Chen, Ruifei Zhang, Junying Chen, Xiangbo Wu, Zhiyi Zhang, Zhihong Chen, Jianquan Li, Xiang Wan, and Benyou Wang. Allava: Harnessing gpt4v-synthesized data for lite vision-language models. arXiv preprint arXiv:2402.11684, 2024. 3, 4, 2", + "[4] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions, 2023. 3", + "[5] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? In NeurIPS, 2024. 3, 7", + "[6] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 7, 8", + "[7] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 7, 8", + "[8] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 3", + "[9] Biplab Deka, Zifeng Huang, Chad Franzen, Joshua Hibschman, Daniel Afergan, Yang Li, Jeffrey Nichols, and Ranjitha Kumar. Rico: A mobile app dataset for building data-driven design applications. In Proceedings of the 30th annual ACM symposium on user interface software and technology, pages 845-854, 2017. 2", + "[10] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM international conference on multimedia, pages 11198-11201, 2024. 7", + "[11] Xinyu Fang, Zhijian Chen, Kai Lan, Shengyuan Ding, Yingji Liang, Xiangyu Zhao, Farong Wen, Zicheng Zhang, Guofeng Zhang, Haodong Duan, et al. Creation-mmbench: Assessing context-aware creative intelligence in mllm. arXiv preprint arXiv:2503.14478, 2025. 3" + ], + "bbox": [ + 93, + 176, + 483, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023.5.2", + "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. GPT-4o system card. arXiv preprint arXiv:2410.21276, 2024. 8", + "[14] Yuxin Jiang, Yufei Wang, Xingshan Zeng, Wanjun Zhong, Liangyou Li, Fei Mi, Lifeng Shang, Xin Jiang, Qun Liu, and Wei Wang. Followbench: A multi-level fine-grained constraints following benchmark for large language models. In ACL, 2024. 1, 2", + "[15] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 3, 7", + "[16] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. LLaVA-OneVision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 7, 8", + "[17] Huayang Li, Siheng Li, Deng Cai, Longyue Wang, Lemao Liu, Taro Watanabe, Yujiu Yang, and Shuming Shi. TextBind: Multi-turn interleaved multimodal instruction-following in the wild. In ACL Findings, 2024. 2", + "[18] Jian Li, Weiheng Lu, Hao Fei, Meng Luo, Ming Dai, Min Xia, Yizhang Jin, Zhenye Gan, Ding Qi, Chaoyou Fu, et al. A survey on benchmarks of multimodal large language models. arXiv preprint arXiv:2408.08632, 2024. 3", + "[19] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023. 7", + "[20] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 3", + "[21] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR,and world knowledge,2024.7,8", + "[22] Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding, 2024. 2", + "[23] Yangzhou Liu, Yue Cao, Zhangwei Gao, Weiyun Wang, Zhe Chen, Wenhai Wang, Hao Tian, Lewei Lu, Xizhou Zhu, Tong Lu, Yu Qiao, and Jifeng Dai. Mminstruct: a high-quality multi-modal instruction tuning dataset with extensive diversity. Science China Information Sciences, 67(12), 2024. 3", + "[24] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. MMBench: Is your multi-modal model an all-around player? In ECCV, 2024. 3, 7", + "[25] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. OCRBench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 2024. 3, 5, 7", + "[26] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua" + ], + "bbox": [ + 516, + 92, + 906, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lin, et al. MMDU: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. In NeurIPS Datasets and Benchmarks Track, 2024. 3", + "[27] Renze Lou, Kai Zhang, and Wenpeng Yin. A comprehensive survey on instruction following. arXiv preprint arXiv:2303.10475, 2023. 1", + "[28] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 3", + "[29] Ziyang Luo, Can Xu, Pu Zhao, Qingfeng Sun, Xiubo Geng, Wenxiang Hu, Chongyang Tao, Jing Ma, Qingwei Lin, and Daxin Jiang. Wizardcoder: Empowering code large language models with evol-instruct. arXiv preprint arXiv:2306.08568, 2023. 2", + "[30] Yubo Ma, Yuhang Zang, Liangyu Chen, Meiqi Chen, Yizhu Jiao, Xinze Li, Xinyuan Lu, Ziyu Liu, Yan Ma, Xiaoyi Dong, et al. MMLongBench-Doc: Benchmarking long-context document understanding with visualizations. In NeurlPS Datasets and Benchmarks Track, 2024. 3", + "[31] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5, 2", + "[32] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023. Accessed: 2025-02-23. 3", + "[33] OpenAI. GPT-4V(ison) System Card. 2023. Accessed: 2025-02-23. 3", + "[34] Yusu Qian, Hanrong Ye, Jean-Philippe Fauconnier, Peter Grasch, Yinfei Yang, and Zhe Gan. MIA-Bench: Towards better instruction following evaluation of multimodal llms. In ICLR, 2025. 1, 2, 3, 5, 7", + "[35] Yiwei Qin, Kaiqiang Song, Yebowen Hu, Wenlin Yao, Sangwoo Cho, Xiaoyang Wang, Xuansheng Wu, Fei Liu, Pengfei Liu, and Dong Yu. InFoBench: Evaluating instruction following ability in large language models. arXiv preprint arXiv:2401.03601, 2024. 1, 2", + "[36] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 2, 5", + "[37] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4, 2", + "[38] Lucy Xiaoyang Shi, Brian Ichter, Michael Equi, Liyiming Ke, Karl Pertsch, Quan Vuong, James Tanner, Anna Walling, Haohuan Wang, Niccolo Fusai, et al. Hi Robot: Open-ended instruction following with hierarchical vision-language-action models. arXiv preprint arXiv:2502.19417, 2025. 2", + "[39] Dingjie Song, Shunian Chen, Guiming Hardy Chen, Fei Yu, Xiang Wan, and Benyou Wang. Milebench: Benchmarking mllms in long context, 2024. 3", + "[40] Fei Wang, Xingyu Fu, James Y. Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou," + ], + "bbox": [ + 91, + 90, + 483, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kai Zhang, Tianyi Lorena Yan, Wenjie Jacky Mo, Hsiang-Hui Liu, Pan Lu, Chunyuan Li, Chaowei Xiao, Kai-Wei Chang, Dan Roth, Sheng Zhang, Hoifung Poon, and Muhao Chen. Muirbench: A comprehensive benchmark for robust multi-image understanding, 2024. 3", + "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7, 8", + "[42] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 5", + "[43] Xilin Wei, Xiaoran Liu, Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Jian Tong, Haodong Duan, Qipeng Guo, Jiaqi Wang, et al. Videorope: What makes for good video rotary position embedding? arXiv preprint arXiv:2502.05173, 2025. 3", + "[44] Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244, 2023. 2", + "[45] Zhiyang Xu, Ying Shen, and Lifu Huang. Multiinstruct: Improving multi-modal zero-shot learning via instruction tuning, 2023. 3", + "[46] Zhiyang Xu, Chao Feng, Rulin Shao, Trevor Ashby, Ying Shen, Di Jin, Yu Cheng, Qifan Wang, and Lifu Huang. Visionplan: Scaling human-labeled tasks in visual instruction tuning, 2024. 3", + "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. MiniCPM-V: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 8", + "[48] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi, 2024. 3, 7", + "[49] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. MM-Vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 7", + "[50] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expertagi. In CVPR, 2024.3,7", + "[51] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2. 5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025. 3", + "[52] Yuhang Zang, Wei Li, Jun Han, Kaiyang Zhou, and" + ], + "bbox": [ + 516, + 90, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chen Change Loy. Contextual object detection with multimodal large language models. IJCV, 2025. 3", + "[53] Tao Zhang, Yanjun Shen, Wenjing Luo, Yan Zhang, Hao Liang, Fan Yang, Mingan Lin, Yujing Qiao, Weipeng Chen, Bin Cui, et al. CFBench: A comprehensive constraints-following benchmark for llms. arXiv preprint arXiv:2408.01122, 2024. 1, 2", + "[54] Xinghua Zhang, Haiyang Yu, Cheng Fu, Fei Huang, and Yongbin Li. Iopo: Empowering llms with complex instruction following via input-output preference optimization, 2024. 2", + "[55] Xiangyu Zhao, Shengyuan Ding, Zicheng Zhang, Haian Huang, Maosong Cao, Weiyun Wang, Jiaqi Wang, Xinyu Fang, Wenhai Wang, Guangtao Zhai, et al. Omnialign-v: Towards enhanced alignment of mllms with human preference. arXiv preprint arXiv:2502.18411, 2025. 4", + "[56] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. In NeurIPS Datasets and Benchmarks Track, 2023. 2", + "[57] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023. 1, 2, 7", + "[58] Wangchunshu Zhou, Yuchen Eleanor Jiang, Ethan Wilcox, Ryan Cotterell, and Mrinmaya Sachan. Controlled text generation with natural language instructions. In ICML, 2023. 2" + ], + "bbox": [ + 91, + 90, + 483, + 484 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "MM-IFEngine: Towards Multimodal Instruction Following Supplementary Material", + "text_level": 1, + "bbox": [ + 199, + 85, + 799, + 138 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A. MM-IFEval", + "text_level": 1, + "bbox": [ + 91, + 155, + 220, + 171 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.1. An overview of Constraints and Instructions", + "text_level": 1, + "bbox": [ + 89, + 181, + 470, + 198 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.1.1. Constraints", + "text_level": 1, + "bbox": [ + 91, + 205, + 220, + 220 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Based on daily use cases and existing research, we have identified six main categories of constraints, which can be further divided into 32 specific constraint types shown in Fig. 5. In this section, we introduce and exemplify these six major constraint categories. For detailed descriptions and examples of all 32 subcategories, please refer to Table 5.", + "bbox": [ + 89, + 226, + 480, + 316 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Text Length Requirements. In this category, we focus on the length of the response, including the number of paragraphs, sentences, and words. We also consider the length of the response in the aspect of poetry or \"Use yes or no to answer the question\". It must be noted that we do not require the model to follow the strict requirement in exact numbers like \"The response must be exactly 56 words\". The constraints we propose in this category are based on reality, with precise numerical requirements only at the sentence or paragraph level, and of moderate size; the rest of the constraints are used to limit by ranges like \"The response must be between 100 and 150 words\", which aligns with the task that people tend to encounter in real-world scenarios.", + "bbox": [ + 89, + 319, + 482, + 515 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Mathematical Requirements. This category includes constraints related to the most common part of answering mathematical problems like precision, scientific notation, and other mathematical requirements. For example, \"Keep two decimal places for the number in the answer\", \"Please round up all the numbers in the answer\", or \"Don't include specific numbers in your answers. Compare numbers with their relative sizes\".", + "bbox": [ + 89, + 517, + 482, + 637 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Language & Formatting Requirements. This category includes constraints related to the language and formatting of the response, such as answering in a specific language, using a specific format like JSON, or using a specific style like poetry. Requirements for tense, writing style, numbering, list, and other language-related or formatting-related aspects are also included in this category.", + "bbox": [ + 89, + 640, + 482, + 744 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Rhetoric & Logic Requirements. \"Rhetoric\" refers to the art of using language to persuade or influence, while \"Logic\" refers to the principles of reasoning and argumentation. This category includes constraints related to the rhetoric and logic of the response, such as the use of metaphor, simple, cause-and-effect relationship, conditional statement, and other rhetoric and logic-related aspects.", + "bbox": [ + 89, + 747, + 482, + 852 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Action Requirements. \"Action\" refers to the action that the model should take like a human. We define this category as the constraints that require the model to perform a specific", + "bbox": [ + 89, + 854, + 482, + 900 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "action, such as tone, role imitation, use specific prefix or suffix, or acting like under some specific situation. We hope this category can help us to evaluate the ability of the model to follow instructions and perform actions in more complex and realistic scenarios.", + "bbox": [ + 511, + 157, + 903, + 232 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Keyword Requirements. \"Keyword\" refers to the specific words or phrases that the model should include or avoid in the response. This category includes constraints related to the response keyword, such as the use of specific keywords, the avoidance of specific keywords, or the variation of specific keywords. For example, \"Use at least three synonyms for 'innovation,' such as 'breakthrough,' 'new approach,' or 'invention,' spread throughout your text.\"", + "bbox": [ + 511, + 234, + 906, + 354 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.1.2. Instruction Tasks", + "text_level": 1, + "bbox": [ + 511, + 369, + 684, + 382 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For source datasets lacking original task instructions, we constructed a diverse task pool containing 18 instructions that encourage open-ended responses from models. These instructions can be categorized into five task types: Descriptive Analysis, Emotional & Perspective, Creative Writing, Social Media & Content, and Roleplay. The classification information and examples of the instructions are shown in Table 6.", + "bbox": [ + 511, + 388, + 905, + 508 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.2. Perception-level Problems", + "text_level": 1, + "bbox": [ + 511, + 525, + 751, + 540 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg", + "image_caption": [ + "Figure 6. Image Source Distribution in perception-level problems.Perception-level problems in MM-IFEval presents a systematic categorization of 100 challenging vision-based instructionfollowing tasks, organized into 13 distinct classes according to image content characteristics and task complexity." + ], + "image_footnote": [], + "bbox": [ + 633, + 566, + 785, + 684 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Perception-level problems in MM-IFEval comprise 100 carefully crafted questions with strong image-constraint correlations. The images can be categorized into 13 information-rich and complex domains shown in Figure 6. Figures 10, 11, 12, and 13 present representative examples from the web interface, diagram, poster, and visual difference categories, respectively, demonstrating the diverse visual challenges incorporated in our benchmark.", + "bbox": [ + 511, + 779, + 908, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 501, + 934 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg", + "image_caption": [ + "Figure 5. Demonstration of constraints categories. We designed 6 main categories for all the constraints used, with a total of 32 subcategories" + ], + "image_footnote": [], + "bbox": [ + 181, + 88, + 823, + 260 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. Image Sources", + "text_level": 1, + "bbox": [ + 89, + 319, + 243, + 335 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The quality of the image source is crucial for the performance of the model. Except of this, the diversity of the image source is also important to fully utilize or evaluate the ability of the model. We use the following image source:", + "bbox": [ + 89, + 344, + 483, + 405 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Natural Scene: The natural scene is the most common image source, which is most used in the real-world like the image of a beautiful landscape, a busy street, or a crowded cafe. In this part, we sample images from CC3M[37] and ALLaVA[3].", + "- UI Interface: The UI interface is the image from the UI interface of the website and mobile application. It is crucial because it represents a significant portion of real-world multimodal interactions where users need to understand and interact with digital interfaces. We collected diverse mobile app UI images from the RICO[9] dataset and web UI images from the MultiUI[22] dataset.", + "- Diagram & Chart: The diagram and chart are the image that contains some specific information like the data, the relationship between the data, or the change of the data. We collect diagram and chart images from ChartQA[31] dataset, which contains diverse diagram and chart images.", + "- **Mathematic:** The math problem is the image that contains a math problem, which is a common task in the real-world like the problem of the math, the solution of the math problem, or the calculation of the math problem. We collect math problem images from Geo170k[12] dataset, which contains diverse geometry problem images." + ], + "bbox": [ + 89, + 406, + 483, + 753 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C. MM-IFEngine Prompt Template", + "text_level": 1, + "bbox": [ + 89, + 768, + 393, + 787 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "MM-IFEngine provides a scalable pipeline for mass-producing instruction-following datasets for multimodal large language models, functioning effectively regardless of whether source datasets contain original instructions. This engine enables systematic augmentation of existing visual datasets with diverse instruction-following tasks. Figures 14 and 15 demonstrate representative prompt templates from", + "bbox": [ + 89, + 794, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "MM-IFEngine's two core components: the instruction generation module and the constraint integration module, respectively, illustrating the methodology behind our automated data construction process.", + "bbox": [ + 511, + 320, + 906, + 381 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D. MM-IFInstruct and MM-IFDPO Dataset", + "text_level": 1, + "bbox": [ + 511, + 402, + 885, + 420 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our MM-IFInstruct dataset integrates three distinct data sources: CC3M (without original instructions), ALLaVA (with pre-existing questions), and a diversity collection composed of MultiUI, ChartQA, and Geo170k. To create the MM-IFDPO dataset for preference optimization, we randomly removed $33\\%$ of constraints from the MM-IFInstruct samples to generate rejected examples. Figures 16, 17, and 18 illustrate representative samples derived from CC3M, ALLaVA, and our diversity collection, respectively, while Figure 19 demonstrates an example pair from the MM-IFDPO dataset showing both preferred and rejected instructions.", + "bbox": [ + 511, + 431, + 908, + 612 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E. Evaluation", + "text_level": 1, + "bbox": [ + 513, + 636, + 633, + 651 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E.1. Rule-based", + "text_level": 1, + "bbox": [ + 513, + 662, + 638, + 678 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We identified 10 constraint subcategories from our taxonomy of 32 that could be algorithmically verified. For these selected constraints, we developed specialized verification functions with targeted parameters. For efficiency, we employed large language models to analyze each constraint specification, select the most appropriate verification function, and extract the necessary parameters. All selections were subsequently validated through manual review to ensure the accuracy and quality of both the function selection and their parameters. The prompt template used for function selection and parameter extraction is illustrated in Figure 20, while Table 7 provides a comprehensive overview of all verification functions with their corresponding parameter examples.", + "bbox": [ + 511, + 688, + 908, + 900 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E.2. Compare Judge Method", + "text_level": 1, + "bbox": [ + 94, + 90, + 313, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Recent works[11, 28] have shown that GPT-4o has the ability to compare two responses from models. For constraint types lacking objective evaluation metrics (such as tone requirements or role imitation), we implemented a comparative assessment method. This approach requires the model under evaluation to generate two responses: one adhering to the target constraint and another without the constraint. A judge model then analyzes both outputs to determine whether significant differences exist between them, thereby more accurately assessing whether the model has successfully followed these subjective constraints. Figure 21 illustrates the prompt used in this comparative evaluation process.", + "bbox": [ + 93, + 112, + 482, + 292 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "E.3. Direct Judge Method", + "text_level": 1, + "bbox": [ + 94, + 301, + 290, + 318 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The Direct Judge method provides the constraint and answer of the model under test directly to the Judge model, and its prompt template is shown in Figure 22.", + "bbox": [ + 93, + 324, + 482, + 369 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 132, + 114, + 326, + 255 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 444, + 103, + 491, + 140 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 504, + 111, + 650, + 132 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "What might have led to the dog's behavior as depicted in this image?", + "bbox": [ + 517, + 155, + 844, + 227 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 133, + 270, + 176, + 306 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 189, + 275, + 343, + 296 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1.target Audience requirement: Your audience is a dog lover.", + "2.tense所需要的: Use present tense in the first paragraph and past tense in the second.", + "3.tone Requirement: Adopt a reassuring, empathetic tone as if consoling someone.", + "4.paragraph_number_limit: Your response must consist of exactly 3 paragraphs.", + "5.mention: Mention the term 'sorry' at least twice throughout your description.", + "6highlight所需要的: Use bold for the first occurrence of the term 'aggressive behavior' in each paragraph.", + "7wrap_up Requirement: Provide a final paragraph summarizing the key arguments.", + "8. perspective Requirement: Please answer the question in the second person." + ], + "bbox": [ + 132, + 313, + 805, + 446 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg", + "image_caption": [ + "Figure 8. A compose-level problem example from the MM-IFEval benchmark in the chart image category." + ], + "image_footnote": [], + "bbox": [ + 127, + 517, + 423, + 678 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg", + "image_caption": [ + "Figure 7. A compose-level problem example from the MM-IFEval benchmark in the general image category." + ], + "image_footnote": [], + "bbox": [ + 457, + 515, + 503, + 551 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 514, + 523, + 655, + 542 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Which region has the highest value of apple production? Give the answer, and analyze the reasons for the large yield of apples in this area.", + "bbox": [ + 529, + 569, + 869, + 643 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 688, + 181, + 724 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 202, + 696, + 352, + 715 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. precision: In the answer, plot the output in the same unit.", + "2.title所需要的: Provide a concise title that summarizes the main idea.", + "3. perspective Requirement: Give your answer from the perspective of a Mexican agricultural expert.", + "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences.", + "5. unstrict_formatting REQUIREments: Number the reasons for your analysis." + ], + "bbox": [ + 132, + 739, + 846, + 849 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 123, + 395, + 255 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 467, + 114, + 511, + 150 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 522, + 125, + 660, + 142 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In triangle ABC, D is the midpoint of BC, E is the midpoint of AD, and F is the midpoint of CE. Given that the area of triangle ABC is 28 square centimeters, consider the impact of these midpoints on the subdivisions of the triangle. Analyze how these midpoints affect the areas of triangles within triangle ABC and provide a detailed explanation to find the area of the shaded region that is formed within triangle BEC and triangle AEC. Finally, deduce and conclude which part of the interior triangles contribute to the shaded area.", + "bbox": [ + 531, + 154, + 864, + 260 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 135, + 268, + 176, + 303 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 189, + 273, + 334, + 292 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1.target Audience requirement: Write your answer for a liberal arts student. You're tutoring her in math.", + "2(word_count_range_limit: Please write between 150 and 200 words in total.", + "3.paragraph_number_limit: Your response must consist of exactly 4 paragraphs.", + "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences.", + "5.not Mention: Please do not mention the words 'formula' or 'equation' in your answer.", + "6.mention: Mention the word 'midpoint' at least three times throughout your description.", + "7.tone Requirement: Write your answer in a positive and encouraging tone, emphasizing the simplicity of the geometric concepts involved." + ], + "bbox": [ + 133, + 316, + 836, + 430 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg", + "image_caption": [ + "Figure 10. A perception-level problem example from the MM-IFEval benchmark in the web category." + ], + "image_footnote": [], + "bbox": [ + 143, + 590, + 163, + 604 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "熱門", + "bbox": [ + 165, + 590, + 189, + 603 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 612, + 171, + 633 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "BITCOIN BTC", + "bbox": [ + 174, + 614, + 218, + 633 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 642, + 169, + 662 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ETHEREUM ETH", + "bbox": [ + 174, + 643, + 232, + 661 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 670, + 171, + 691 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "TETHER U... USDT", + "bbox": [ + 174, + 672, + 233, + 691 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 699, + 173, + 720 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "USDC USDC", + "bbox": [ + 174, + 700, + 205, + 719 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 729, + 173, + 750 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "BNB BNB", + "bbox": [ + 174, + 729, + 197, + 748 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 757, + 173, + 777 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "BUSD BUSD", + "bbox": [ + 174, + 758, + 202, + 776 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3,156,526.95 $0.76\\%$", + "bbox": [ + 258, + 613, + 346, + 633 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "86,060.91-2.64%", + "bbox": [ + 272, + 643, + 321, + 662 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "32.83-0.03%", + "bbox": [ + 290, + 672, + 320, + 691 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "32.83 -0.01%", + "bbox": [ + 289, + 700, + 320, + 720 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "19,024.08+0.47%", + "bbox": [ + 272, + 729, + 321, + 750 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "32.890.08%", + "bbox": [ + 290, + 758, + 320, + 773 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 614, + 346, + 630 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 643, + 346, + 657 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg", + "image_caption": [ + "Figure 9. A compose-level problem example from the MM-IFEval benchmark in the geometry image category." + ], + "image_footnote": [], + "bbox": [ + 447, + 540, + 493, + 578 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 504, + 550, + 648, + 570 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "If someone just bought the orange currency for $12,000 and the blue currency for$ 15,000, what is the total amount of money they have now, based on the current currency situation? Round off the decimal part of the answer.", + "bbox": [ + 511, + 585, + 859, + 664 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 444, + 696, + 493, + 732 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground Truth", + "text_level": 1, + "bbox": [ + 504, + 702, + 689, + 723 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "26907", + "bbox": [ + 651, + 771, + 725, + 790 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg", + "image_caption": [ + "Figure 11. A perception-level problem example from the MM-IFEval benchmark in the diagram category." + ], + "image_footnote": [], + "bbox": [ + 124, + 152, + 390, + 410 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 140, + 501, + 176 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 513, + 148, + 653, + 169 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this flowchart, which node is reached after the first condition encountered from Start is judged to be Yes? Preserve the case of node names.", + "bbox": [ + 521, + 183, + 859, + 255 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 292, + 503, + 328 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Ground Truth", + "text_level": 1, + "bbox": [ + 513, + 297, + 692, + 319 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "End", + "bbox": [ + 669, + 366, + 717, + 385 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg", + "image_caption": [ + "Figure 12. A perception-level problem example from the MM-IFEval benchmark in the poster category." + ], + "image_footnote": [], + "bbox": [ + 107, + 627, + 436, + 776 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 561, + 501, + 595 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 511, + 569, + 648, + 589 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Observe the alphabet represented by white dots and line segments in the figure. Starting from 'A', what is the second letter composed of eight white dots? Output this letter in uppercase.", + "bbox": [ + 519, + 603, + 828, + 691 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 708, + 503, + 742 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Ground Truth", + "text_level": 1, + "bbox": [ + 511, + 714, + 687, + 734 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "G", + "bbox": [ + 678, + 780, + 696, + 797 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 114, + 401, + 255 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg", + "image_caption": [ + "Figure 14. Prompt template for image generation instructions using a large language model in MM-IFEngine." + ], + "image_footnote": [], + "bbox": [ + 464, + 112, + 506, + 147 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 519, + 121, + 653, + 141 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Sam and Tom used the red box and Tom used the blue box. They each gave three answers. Would you please judge which of the two boys found more differences? Print the name of the winning boy directly.", + "bbox": [ + 526, + 155, + 851, + 231 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg", + "image_caption": [ + "Figure 13. A perception-level problem example from the MM-IFEval benchmark in the finding difference category." + ], + "image_footnote": [], + "bbox": [ + 124, + 258, + 401, + 397 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 261, + 508, + 296 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Ground Truth", + "text_level": 1, + "bbox": [ + 519, + 266, + 694, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Tom", + "bbox": [ + 669, + 333, + 718, + 351 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Instruction generation prompt", + "text_level": 1, + "bbox": [ + 148, + 469, + 517, + 491 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are an expert in generating concise instructions for images.", + "bbox": [ + 132, + 508, + 718, + 527 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "## Task", + "bbox": [ + 135, + 549, + 212, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Given the image, generate a list of appropriate instructions for it. Your instructions should not be too long or overly detailed, and they should not include any specific details about the image.", + "bbox": [ + 135, + 569, + 828, + 627 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "On one hand, you can choose appropriate instructions cases for the provided image from the Examples and modify them naturally for the image.", + "bbox": [ + 135, + 630, + 776, + 688 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "On the other hand, you can generate new instructions, but only if these new instructions are relevant and appropriate for the image.", + "bbox": [ + 135, + 690, + 828, + 729 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Examples", + "bbox": [ + 135, + 750, + 259, + 768 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "{original instructions list}", + "bbox": [ + 135, + 770, + 375, + 789 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You output format should be in the following format:", + "bbox": [ + 135, + 809, + 620, + 828 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "{output format}", + "bbox": [ + 135, + 830, + 282, + 849 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Constraint integration prompt", + "text_level": 1, + "bbox": [ + 148, + 287, + 519, + 310 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You are an expert in add appropriate constraints to the instruction for images.", + "bbox": [ + 143, + 319, + 661, + 334 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Task", + "text_level": 1, + "bbox": [ + 143, + 348, + 202, + 359 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Given the original instruction, your task is to expand the instruction by adding constraints to it.", + "bbox": [ + 140, + 362, + 774, + 377 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You can select **as many as possible** appropriate types of constraints from the given **Constraints List** below and modify them. However, ensure that the constraints you generate meet the following requirements:", + "bbox": [ + 140, + 377, + 869, + 405 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Maintain the thematic consistency of the original instruction.", + "2.Be relevant and appropriate for the original instruction and be concise.", + "3. Do not conflict with the original instruction or with each other." + ], + "bbox": [ + 143, + 406, + 627, + 448 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For example, if the original instruction is a simple query like \"What color is the truck?\" you should avoid adding constraints such as \"Your response must be in three paragraphs,\" as such a requirement is unnecessary for a short and simple question. Moreover, if the original instruction is a question like \"What is the object in the image?\", you should avoid adding constraints such as \"Respond in the second-person to directly address the reader,\" as it conflicts with the original instruction.", + "bbox": [ + 140, + 462, + 859, + 535 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Original Instruction {originalInstruction}", + "guess_lang": "txt", + "bbox": [ + 143, + 549, + 300, + 579 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "## Constraints List\nYour added constraints can be from the following types:\n{constraints_list_str}", + "guess_lang": "erb", + "bbox": [ + 143, + 592, + 517, + 637 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Output Format Your output should follow the format below: {output format}", + "guess_lang": "txt", + "bbox": [ + 143, + 648, + 571, + 680 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 15. prompt template for integrating constraints in MM-IFEngine.", + "bbox": [ + 282, + 717, + 712, + 732 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg", + "image_caption": [ + "Instruction" + ], + "image_footnote": [], + "bbox": [ + 166, + 275, + 421, + 385 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 380, + 158, + 406 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Write a short poem that captures the hustle and bustle of city life depicted in this image.", + "bbox": [ + 158, + 416, + 442, + 445 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg", + "image_caption": [ + "Figure 16. A sample constructed by MM-IFEngine pipeline from cc3m dataset" + ], + "image_footnote": [], + "bbox": [ + 114, + 470, + 143, + 497 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 156, + 474, + 241, + 488 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Write each stanza so it includes a parenthetical phrase.", + "2.Each stanza should have 4 lines.", + "3.Use bold for every noun.", + "4.Start the poem with 'In the heart of the city...' and end with '...the city never sleeps.'", + "5.Use present tense throughout the poem.", + "6.Use the third-person perspective to maintain an objective viewpoint." + ], + "bbox": [ + 155, + 503, + 433, + 684 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg", + "image_caption": [ + "Answer" + ], + "image_footnote": [], + "bbox": [ + 482, + 292, + 531, + 321 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In the heart of the city, where the \\*\\*streets\\*\\* hum with life.", + "bbox": [ + 504, + 354, + 862, + 380 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "People rush (beneath the bright blue sky),", + "bbox": [ + 504, + 383, + 782, + 396 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Buses pass (\\*\\*tires\\*\\* whispering on the road),", + "bbox": [ + 504, + 397, + 803, + 411 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Every moment feels like a lively show.", + "bbox": [ + 504, + 411, + 751, + 425 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The \\*\\*signs\\*\\* stand tall amidst the crowd,", + "bbox": [ + 504, + 439, + 779, + 452 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "An ambulance speeds by (with urgent sound),", + "bbox": [ + 504, + 453, + 807, + 467 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Newspapers rest (in their quiet stack),", + "bbox": [ + 504, + 468, + 751, + 481 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "As footsteps weave through the urban track.", + "bbox": [ + 504, + 482, + 797, + 494 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Buildings rise, their facades grand,", + "bbox": [ + 504, + 508, + 730, + 523 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "A man walks past (across the land),", + "bbox": [ + 504, + 523, + 735, + 537 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Voices blend into ambient noise,", + "bbox": [ + 504, + 537, + 720, + 551 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Life unfolds without any poise.", + "bbox": [ + 504, + 551, + 705, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Shoppers carry bags (full of delight),", + "bbox": [ + 504, + 580, + 743, + 594 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Sunlight dances (on the pavement's sight),", + "bbox": [ + 504, + 595, + 785, + 608 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Each face tells tales of daily grind,", + "bbox": [ + 504, + 609, + 727, + 623 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Yet spirits soar, unconfined....the city never sleeps.", + "bbox": [ + 504, + 625, + 831, + 637 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg", + "image_caption": [ + "Instruction" + ], + "image_footnote": [], + "bbox": [ + 155, + 260, + 382, + 392 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "What treat can be baked in an oven?", + "bbox": [ + 153, + 431, + 385, + 445 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg", + "image_caption": [ + "Figure 17. A sample constructed by MM-IFEngine pipeline from Allava dataset" + ], + "image_footnote": [], + "bbox": [ + 107, + 468, + 140, + 496 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 151, + 472, + 235, + 484 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Write your answer for a general adult audience with limited baking experience, providing clear and simple explanations.", + "2. Write your answer in a positive and encouraging tone, making the baking process seem accessible and enjoyable.", + "3. Mention both 'oven' and 'baking' at least once in your response, ensuring they are used naturally and contextually.", + "4. Begin every sentence with an exclamation mark to convey excitement about baking.", + "5. Your response must consist of exactly 3 paragraphs, each focusing on a different type of treat that can be baked in an oven." + ], + "bbox": [ + 148, + 494, + 424, + 705 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 473, + 263, + 522, + 292 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Answer", + "text_level": 1, + "bbox": [ + 524, + 273, + 580, + 286 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "!One delightful treat you can bake in an oven is cookies! Whether you prefer classic chocolate chip or something more adventurous like oatmeal raisin, baking cookies is a fun and rewarding activity. The aroma of freshly baked cookies wafting through your home will make everyone excited!", + "bbox": [ + 496, + 303, + 849, + 434 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "! Another scrumptious option for oven baking is muffins! Muffins are perfect for breakfast or as a snack, and you can customize them with various ingredients such as blueberries, bananas, or even chocolate chips. Baking muffins allows you to experiment with flavors while enjoying the satisfaction of creating something delicious from scratch.", + "bbox": [ + 496, + 435, + 854, + 566 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "! Lastly, consider baking a cake! Cakes come in countless varieties, from simple vanilla to rich chocolate, and can be decorated to suit any occasion. Baking a cake not only provides a sweet treat but also enhances your skills in mixing, measuring, and decorating. So grab your apron and get ready to enjoy the magic of baking in your own kitchen!", + "bbox": [ + 496, + 566, + 852, + 699 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg", + "image_caption": [ + "Figure 18. A sample constructed by MM-IFEngine pipeline from geo170k dataset" + ], + "image_footnote": [], + "bbox": [ + 212, + 257, + 334, + 359 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 351, + 150, + 378 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 153, + 362, + 233, + 373 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In the diagram, quadrilateral ABCD is inscribed in circle O with diameter BE and line AE connected to it. Given that the measure of angle BCD is twice the measure of angle BAD, and that angle DAE is equal to angle CAD, develop a comprehensive geometric proof to determine the degree measure of angle DAE.", + "bbox": [ + 151, + 380, + 433, + 492 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 498, + 137, + 523 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Constraints", + "text_level": 1, + "bbox": [ + 150, + 501, + 233, + 513 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "1. Your response must consist of exactly 3 paragraphs, each focusing on a different aspect of the proof: the initial setup, the application of theorems, and the final calculation.", + "bbox": [ + 148, + 530, + 416, + 578 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. Each paragraph should contain between 4 and 6 sentences, ensuring a balanced and detailed explanation.", + "3.The response must be between 120 and 150 words in total, maintaining clarity and conciseness." + ], + "bbox": [ + 148, + 579, + 423, + 652 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "4. Write your answer in a clear and methodical tone, suitable for a high school geometry class.", + "bbox": [ + 148, + 652, + 413, + 678 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "5.Provide a concise title that summarizes the main idea of the proof, such as 'Geometric Proof for Angle DAE'.", + "bbox": [ + 148, + 678, + 421, + 715 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 265, + 524, + 294 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Answer", + "text_level": 1, + "bbox": [ + 524, + 276, + 581, + 287 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "**Geometric Proof for Angle DAE** To begin our proof, we note that since quadrilateral ABCD is inscribed in circle O with BE as its diameter, angle ABE is a right angle (90 degrees) by the Inscribed Angle Theorem. This theorem states that an angle inscribed in a semicircle is always a right angle. Additionally, given that angle BCD is twice angle BAD, let's denote angle BAD as $\\backslash(x\\backslash)$ . Therefore, angle BCD measures $\\backslash(2x\\backslash)$ . $\\backslash$ n\\NNext, applying the properties of cyclic quadrilaterals, we know that opposite angles sum up to 180 degrees. Thus, angle BAD plus angle BCD equals 180 degrees. Substituting the values, we get $\\backslash(x + 2x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)$ , leading to $\\backslash(3x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)$ or $\\backslash(x = 60^{\\wedge}\\backslash\\text{circ} \\backslash)$ . Since angle DAE is equal to angle CAD, and considering triangle ADE where AD is common, angle DAE also measures 30 degrees because angle CAD is half of angle BAD due to symmetry and the given condition. Finally, calculating the exact measure of angle DAE involves recognizing that angle CAD is half of angle BAD, which was determined to be 60 degrees. Hence, angle CAD is 30 degrees. Consequently, angle DAE, being congruent to angle CAD, also measures 30 degrees. This completes our proof, demonstrating that angle DAE indeed measures 30 degrees based on the provided conditions and geometric principles applied throughout the process.", + "bbox": [ + 500, + 313, + 859, + 696 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg", + "image_caption": [ + "Figure 19. A DPO training set sample, where the rejected data is obtained by removing $33\\%$ of the constraints" + ], + "image_footnote": [], + "bbox": [ + 138, + 99, + 356, + 210 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg", + "image_caption": [ + "Figure 20. Prompt template for automated verification function selection and paramater extraction" + ], + "image_footnote": [], + "bbox": [ + 112, + 213, + 140, + 236 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "prompt", + "text_level": 1, + "bbox": [ + 148, + 223, + 199, + 234 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "What might the seabird be doing flying alone over the ocean? Respond in the third-person perspective to maintain an objective viewpoint. Write the answer in the form of a brief nature observation log entry. Each paragraph should contain between 3 and 5 sentences. Write your answer for a general adult audience with an interest in wildlife and nature. Conclude with a single sentence summarizing the likely behavior of the seabird.", + "bbox": [ + 125, + 247, + 344, + 391 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 109, + 403, + 133 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Chosen", + "bbox": [ + 408, + 117, + 450, + 128 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Score:5/5", + "bbox": [ + 470, + 116, + 550, + 130 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In today's observation, a solitary seabird was noted gliding gracefully above the vast expanse of the ocean. The bird's wings were fully extended, indicating efficient use of air currents to conserve energy while searching its surroundings. Seabirds often fly alone when scouting for food sources such as schools of fish or marine organisms near the surface. This lone flight could also serve purposes like territorial patrol or simply enjoying favorable wind conditions. In conclusion, the seabird is most likely engaged in foraging activities.", + "bbox": [ + 382, + 148, + 622, + 373 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 107, + 666, + 135 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Rejected", + "bbox": [ + 671, + 114, + 725, + 127 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Score:3/5", + "bbox": [ + 736, + 114, + 816, + 128 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "**Nature Observation Log Entry**", + "**Date:** [Insert Date]", + "**Location:** [Insert Location]" + ], + "bbox": [ + 645, + 143, + 787, + 171 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "**Observation:** A solitary seabird, possibly a seagull or pelican, is seen soaring gracefully above the vast expanse of the ocean. The bird's wings are spread wide, indicating it is likely gliding or flying at a steady pace.", + "bbox": [ + 645, + 172, + 880, + 207 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The water below appears calm, with gentle ripples on its surface, suggesting a serene environment. The bird's position and posture suggest it may be searching for food, such as fish or crustaceans, that are abundant in the ocean's depths. The absence of other birds or marine life in the immediate vicinity could imply that this bird is either hunting alone or simply enjoying a solitary flight over the sea.", + "bbox": [ + 645, + 208, + 880, + 279 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "\\*\\*Analysis:\\*\\* Seabirds like seagulls and pelicans are known for their ability to glide and soar on the wind currents, conserving energy while searching for food.", + "bbox": [ + 645, + 280, + 875, + 306 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The bird's choice of location, over the ocean, indicates its reliance on marine resources for sustenance. The calmness of the water suggests favorable weather conditions, which are essential for successful hunting and gliding. This observation provides insight into the key role that birds play in the formation of such environments.", + "bbox": [ + 645, + 308, + 874, + 354 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "highlighting their adaptability to oceanic environments and their role in the marine ecosystem.", + "bbox": [ + 645, + 361, + 875, + 378 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Prompt to choose verified function and extract params", + "text_level": 1, + "bbox": [ + 127, + 467, + 645, + 486 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Please analyze the following constraint and select the most appropriate function from the given list to verify this constraint. Then extract the required parameters for the verification function from the constraint.", + "bbox": [ + 145, + 520, + 839, + 571 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Constraint content: {constraint value}", + "bbox": [ + 145, + 590, + 454, + 609 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Available verification functions: {all candidate validation function names and parameters}", + "bbox": [ + 145, + 626, + 612, + 662 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Please complete the analysis following these steps:", + "bbox": [ + 145, + 678, + 558, + 695 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "**Your task:**", + "bbox": [ + 148, + 696, + 263, + 710 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Select the most appropriate verification function from the above list (return empty if none is suitable)", + "2. Extract the required parameters from the constraint based on the function description" + ], + "bbox": [ + 145, + 714, + 849, + 782 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "**Please return the result in JSON format as follows:** {output format}", + "bbox": [ + 145, + 801, + 584, + 837 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Compare Judge Prompt", + "text_level": 1, + "bbox": [ + 158, + 292, + 450, + 315 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "You are an expert in judging whether the response follows the given constraint. Your task is to assess whether the model's response satisfies the given constraint and return True or False. I will provide you with the constraint and the model's response under this constraint. To assist with your evaluation, I will also provide you with the model's response to the same question without the constraint.", + "bbox": [ + 143, + 334, + 841, + 417 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Constraint: {constraint}", + "bbox": [ + 143, + 419, + 334, + 435 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Response under the constraint: {pred_with Constraint}", + "bbox": [ + 143, + 436, + 575, + 452 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Response without the constraint: {pred Without constraint}", + "bbox": [ + 143, + 453, + 614, + 469 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "**Please follow the steps below to evaluate**:", + "bbox": [ + 143, + 486, + 501, + 502 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Step 1. Compare the model's response under the constraint with its response without the constraint. If you believe these two answers are very similar, it means the model has not fully considered the impact of the constraint on the answer. Please return False.", + "Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response does not meet the requirements specified in the constraint, return False. Otherwise, if the response effectively satisfies the constraint, return True." + ], + "bbox": [ + 143, + 503, + 857, + 619 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "**Response Format**: Your answer should only include \"True\" or \"False\", and no additional text.", + "bbox": [ + 143, + 621, + 771, + 654 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 21. Prompt template for Compare Judge Method", + "bbox": [ + 331, + 717, + 663, + 732 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Direct Judge Prompt", + "text_level": 1, + "bbox": [ + 138, + 294, + 392, + 316 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints.", + "bbox": [ + 138, + 338, + 836, + 373 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Please follow the requirements below to make the judgment:", + "bbox": [ + 140, + 375, + 632, + 391 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Be strict and consistent in your assessment.", + "2. You should refer to the content of image to make the judgment.", + "3. For one constraint, if the response fails to fully meet the constraint, give it a score of 0." + ], + "bbox": [ + 140, + 393, + 849, + 443 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Otherwise, give it a score of 1. ", + "bbox": [ + 140, + 445, + 550, + 460 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "{prediction}", + "bbox": [ + 140, + 463, + 240, + 479 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "bbox": [ + 140, + 481, + 303, + 496 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "bbox": [ + 140, + 498, + 349, + 512 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "{constraints_str}", + "bbox": [ + 140, + 515, + 277, + 531 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "bbox": [ + 140, + 532, + 336, + 547 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You should judge and explain for each constraint in the constraint list without omitting any constraint. Finally, list scores of all the constraints in one sentence.", + "bbox": [ + 140, + 550, + 839, + 583 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You should strictly follow the format below:", + "bbox": [ + 140, + 585, + 493, + 599 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Judgement: ...", + "bbox": [ + 140, + 603, + 251, + 618 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of constraint_n: x/1.", + "bbox": [ + 140, + 619, + 831, + 654 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 22. Prompt template for Direct Judge Method", + "bbox": [ + 341, + 710, + 655, + 724 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 924, + 506, + 935 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/9869dc9133a474107a7d86c90dc56f607a78d7c877ecf631366616bdfd7852f9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Main ClassSubclassEvaluationDescriptionExample
A. Rhetoric & LogicA.1 Rhetoric requirementsCompare JudgeConstraint that requires the response to use a specific rhetorical technique.“Your output should include a metaphor.”
A.2 Logical relationDirect JudgeConstraint that ensures logical cohesion within the response by requiring specific logical connectors or structures.“Each paragraph must contain at least one cause-and-effect relationship.”
B. Format limitB.1 Natural languageDirect JudgeConstraint specifying which natural language(s) should be used in the response.“Please answer in Spanish.”
B.2 Part of speechDirect JudgeConstraint that requires the response to use a specific part of speech.“Use at least three adjectives in your response.”
B.3 Sentence structureDirect JudgeConstraint that specifies special sentence structures to be used in the response.“Write each sentence so it includes a parenthetical phrase.”
B.4 Tense requirementsDirect JudgeConstraint that specifies the use of multiple tenses within the response.“In past tense totally.”
B.5 PunctuationRule-baseConstraint specifying unconventional yet feasible punctuation usage in the response.“Replace all periods with semicolons.”
B.6 HighlightDirect JudgeConstraint that specifies a unique but manageable method for highlighting text.“Use **bold** for every noun.”
B.7 Title requirementsDirect JudgeConstraint that specifies how titles should be added to the response.“Provide a concise title that summarizes the main idea.”
B.8 Style requirementsCompare JudgeConstraint that specifies an unconventional or distinctive writing style for the response.“Write the answer in the form of a brief detective story.”
B.9 Case requirementsDirect JudgeConstraint specifying an unusual yet readable approach to letter case in the response.“Write all nouns in UPPERCASE and all adjectives in lowercase.”
B.10 Unstrict formatDirect JudgeConstraint specifying a unique format for the output while keeping it approachable.“Format your response as a short play script with speaker labels.”
B.11 Strict formatDirect JudgeConstraint that requires the response to follow a strictly defined format.“Please provide the output as well-formed XML with custom tags.”
B.12 Number and ListDirect JudgeConstraint for using numbered or bulleted lists in the response.“Present all key points as a numbered list with bulleted sub-lists.”
B.13 Wrap upDirect JudgeConstraint that requires a concise, well-structured summary or conclusion.“Provide a final paragraph summarizing the key arguments.”
B.14 First letterDirect JudgeConstraint specifying a pattern for the first letters of sentences or paragraphs.“Each sentence should begin with a letter that progresses through the alphabet.”
C. Text Length limitC.1 Paragraph limitRule-baseConstraint that specifies the number of paragraphs in the response.“Your response must consist of exactly 4 paragraphs.”
C.2 Sentence limitRule-baseConstraint that specifies the number of sentences in each paragraph.“Totally use 5 sentences in your response.”
C.3 Word limitRule-baseConstraint that specifies a small range for the total number of words in the text.“Your response must be a single word or phrase.”
D. Math limitD.1 PrecisionRule-baseConstraint that specifies the level of precision required in mathematical calculations.“Keep two decimal places for all numbers in the answer.”
D.2 Scientific notationRule-baseConstraint that requires the use of scientific notation for large or small numbers.“Express all numbers greater than 1,000 in scientific notation.”
E. Action limitE.1 Role imitationCompare JudgeConstraint requiring the response to imitate the tone and style of a specific role or public figure.“Please answer in the style of a sports commentator.”
E.2 Prefix and SuffixRule-baseConstraint that requires the response to begin or end with a specific phrase or symbol.“Please start your answer with ‘Once upon a time...’”
E.3 Tone requirementCompare JudgeConstraint specifying an emotional tone for the response.“Write your answer in a positive and encouraging tone.”
E.4 PerspectiveDirect JudgeConstraint that specifies a narrative perspective for the response.“Write your answer in the first-person singular as a personal account.”
E.5 Target audienceCompare JudgeConstraint requiring the response to be tailored for a specific audience.“Craft your response as if explaining to high school students.”
E.6 SituationCompare JudgeConstraint requiring the response to be set in a specific situation or scenario.“Answer as if you are giving safety instructions before a flight.”
E.7 Prior conditionDirect JudgeConstraint stating that when a specific condition is met, the response must follow a particular process.“If the user requests legal advice, begin with a disclaimer.”
F. KeywordF.1 MentionRule-base & Direct JudgeConstraint that requires including a specific keyword a certain number of times.“Mention ‘GreenTech’ exactly three times throughout.”
F.2 Not mentionRule-base & Direct JudgeConstraint that requires avoiding specific keywords or phrases.“Do not mention the words ‘budget’ or ‘investment’.”
F.3 Multiple mentionRule-base & Direct JudgeConstraint requiring including multiple specified keywords in a balanced manner.“Mention both ‘sustainability’ and ‘renewable energy’ at least twice.”
F.4 Keyword variationDirect JudgeConstraint requiring the use of synonyms or variations of a given keyword.“Use at least three synonyms for ‘innovation’ throughout your text.”
", + "bbox": [ + 199, + 223, + 795, + 736 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 5. Constraint Categories and Evaluation Methods for MM-IFEval", + "bbox": [ + 285, + 747, + 709, + 760 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 491, + 924, + 506, + 935 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/a0eaad4b1104e3743ad2ff61115c055cb525b491b47d3414f47134eaa66179b5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryInstruction
Descriptive AnalysisDescribe the animal's typical habitat, diet, and one unique behavioral trait.
Provide a detailed analysis of the image, including the setting, characters, and notable objects.
Explain the activity taking place in the image.
Describe the activities of the person on the left in the image.
Emotional & PerspectiveWhat emotions do you think the person in this image might be feeling?
Imagine you are the person on the left in the scene depicted in this image, write a story about what you would do next.
Personify the sign in the image and express its feelings about the rule it presents.
Creative WritingCreate a short conversation between any two individuals in the scene.
Pretend this snapshot belongs to a larger story. Write a quick paragraph setting up the next plot twist.
Use this picture as your muse. Craft a brief poem—any style—that captures the emotion you sense.
Turn this scene into a short children's story focusing on wonder and curiosity.
Write a short poem with two stanzas, inspired by the emotion or content depicted in this image.
Social Media & ContentAssume this is an image you are about to post on Twitter. Please provide a short, upbeat caption describing it.
Assume you are creating a Pinterest pin with this image. Write a short inspirational or motivational caption to accompany it.
If this image were promoting an upcoming event, compose a quick announcement with the date, a highlight of what to expect, and a call-to-action.
Role PlayImagine you are the photographer who took this picture. Briefly explain why you chose to capture this particular moment and what story you hope it conveys.
", + "bbox": [ + 217, + 323, + 779, + 636 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 6. Task Pool for MM-IFEngine", + "bbox": [ + 385, + 648, + 609, + 661 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/746967d3754041f1cfef9cc64a9093ae1f7ea58de21ff7208c444192c75c151d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Verified Function NameFunction ParametersConstraint ExampleParameter Example
check Whether\\_responseParagraph\\_number_in_rangelower_bound:int,upper_bound:intThe number of text paragraphs be at least 3[3, 10000]
check Whether\\_response\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences be exactly 3[3, 3]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences in each paragraph be less than 3[0, 2]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_range_listranges:List[tuple]The number of sentences in the first paragraph be exactly 3, and in the second paragraph be at most 2[(3, 3), (1, 2)]
check Whether\\_each\\_paragraph\\_sentence\\_number_exceedsexceed_num:int,upper_bound:intEach new paragraph should have 1 sentence more than the previous one, no paragraph exceeds 7 sentences[1, 7]
check Whether\\_response_word_count_in_rangelower_bound:int,upper_bound:intThe number of words should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_rangelower_bound:int,upper_bound:intThe number of words in each paragraph should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_range_listranges:List[tuple]The number of words in the first paragraph be between 20 and 30, in the second between 50 and 80[(20, 30), (50, 80)]
check Whether\\_whole\\_response_notContain_certain_substringsubstring:strThe response should not contain the word "apple"["apple"]
check Whether\\_whole\\_response_notContain_certain_substringssubstrings:List[str]The response should not contain the words "apple" and "banana"[["apple", "banana"]]
check Whether\\_each\\_sentence_begin_with_certain_substringsubstring:strEach sentence should start with exclamation point["!"]
check Whether\\_each\\_sentence_end_with_certain_substringsubstring:strEach sentence should end with "apple"["apple"]}
check Whether\\_whole\\_response_begin_with_certain_substringsubstring:strThe response should start with "apple"["apple"]}
check Whether\\_whole\\_response_end_with_certain_substringsubstring:strThe response should end with "apple"["apple"]}
check Whether\\_keywords_metioned_in_rangekeywords:List[str], lower_bound(times:int, upper_bound(times):intThe response should mention the word "apple" at least 3 times[["apple"], 3, 10000]
check_number_precision_in_responseprecision:intThe numbers in the response should have 2 decimal places[2]
check Whether has no\\_number_in_response-The response should not contain any number[]
check Scientific_notation\\_precision_in_responsesignificantDigits:intThe numbers in the response should have 3 significant digits[3]
", + "bbox": [ + 165, + 224, + 831, + 733 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Table 7. Verification Functions for rule-based evaluation method in MM-IFEval", + "bbox": [ + 261, + 744, + 733, + 756 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 491, + 924, + 506, + 936 + ], + "page_idx": 27 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_model.json b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e659efaf5c2d30d34c9a65553a8bc0a156b4b375 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_model.json @@ -0,0 +1,5877 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.2, + 0.131, + 0.8, + 0.154 + ], + "angle": 0, + "content": "MM-IFEngine: Towards Multimodal Instruction Following" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.179, + 0.877, + 0.253 + ], + "angle": 0, + "content": "Shengyuan Ding\\(^{1,2*}\\), Shenxi Wu\\(^{1,2*}\\), Xiangyu Zhao\\(^{2,3}\\), Yuhang Zang\\(^{2\\boxtimes}\\), Haodong Duan\\(^{2}\\), Xiaoyi Dong\\(^{2}\\), Pan Zhang\\(^{2}\\), Yuhang Cao\\(^{2}\\), Dahua Lin\\(^{2,4,5}\\), Jiaqi Wang\\(^{2,6\\boxtimes}\\) \n\\(^{1}\\)Fudan University \\(^{2}\\)Shanghai AI Laboratory \\(^{3}\\)Shanghai Jiaotong University \\(^{4}\\)The Chinese University of Hong Kong \\(^{5}\\)CPII under InnoHK \\(^{6}\\)Shanghai Innovation Institute" + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.272, + 0.258, + 0.285 + ], + "angle": 0, + "content": "(a) Current MMIF Bench" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.289, + 0.27, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.274, + 0.35, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.317, + 0.345, + 0.339 + ], + "angle": 0, + "content": "Various & Abundant" + }, + { + "type": "image_caption", + "bbox": [ + 0.276, + 0.342, + 0.347, + 0.351 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.275, + 0.498, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.348, + 0.428, + 0.358 + ], + "angle": 0, + "content": "MTA-Bench (About 1k constraints)" + }, + { + "type": "image_caption", + "bbox": [ + 0.404, + 0.351, + 0.484, + 0.359 + ], + "angle": 0, + "content": "(300 questions)" + }, + { + "type": "image_caption", + "bbox": [ + 0.397, + 0.365, + 0.583, + 0.38 + ], + "angle": 0, + "content": "(b) MM-IFEval Benchmark" + }, + { + "type": "image", + "bbox": [ + 0.442, + 0.376, + 0.479, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.413, + 0.399, + 0.485, + 0.406 + ], + "angle": 0, + "content": "follow instruction" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.407, + 0.483, + 0.448 + ], + "angle": 0, + "content": "To Say \nyou are the musician \nimage. Write about your \ns and feelings while \ning." + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.394, + 0.34, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.291, + 0.445, + 0.333, + 0.454 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.461, + 0.465, + 0.476 + ], + "angle": 0, + "content": "1. Answer as if you are facing to the audience. \n2. Use No more than 60 words...." + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.273, + 0.536, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.274, + 0.708, + 0.288 + ], + "angle": 0, + "content": "32 Categories of Constraints" + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.296, + 0.686, + 0.31 + ], + "angle": 0, + "content": "5.1 Average constraints" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.316, + 0.536, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.32, + 0.709, + 0.334 + ], + "angle": 0, + "content": "3 Evaluation metrics combined" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.342, + 0.536, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.345, + 0.689, + 0.359 + ], + "angle": 0, + "content": "400 high-quality samples" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.361, + 0.629, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.376, + 0.686, + 0.391 + ], + "angle": 0, + "content": "100 Perception-Level" + }, + { + "type": "image_caption", + "bbox": [ + 0.594, + 0.395, + 0.637, + 0.402 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.404, + 0.632, + 0.455 + ], + "angle": 0, + "content": "To See letters can you identify covering the right half e poster? Output in from top to bottom and o right separated with '" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.463, + 0.614, + 0.475 + ], + "angle": 0, + "content": "R,e,a,d,i,n,a,f,u" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.394, + 0.701, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.449, + 0.699, + 0.456 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.729, + 0.273, + 0.859, + 0.299 + ], + "angle": 0, + "content": "(c) MM-IF Dataset SFT & DPO" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.307, + 0.834, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.836, + 0.309, + 0.88, + 0.335 + ], + "angle": 0, + "content": "23kData generated with our MMTE Engine" + }, + { + "type": "image", + "bbox": [ + 0.851, + 0.337, + 0.88, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.734, + 0.363, + 0.867, + 0.377 + ], + "angle": 0, + "content": "Compose a brief poem inspired by the cozy and serene." + }, + { + "type": "image_caption", + "bbox": [ + 0.735, + 0.378, + 0.875, + 0.398 + ], + "angle": 0, + "content": "Each stanza should have 4 lines. Your output should include a metaphor" + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.409, + 0.735, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.74, + 0.407, + 0.874, + 0.438 + ], + "angle": 0, + "content": "In a room where light gently plays, \nA haven carved from nature's ways. \nThe river whispers calm and clear, \nSerene as thoughts that banish fear..." + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.449, + 0.735, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.443, + 0.853, + 0.457 + ], + "angle": 0, + "content": "In a world of hustle and bustle \nA haven of peace and solitude." + }, + { + "type": "image_caption", + "bbox": [ + 0.744, + 0.464, + 0.865, + 0.479 + ], + "angle": 0, + "content": "Soft curtains dance in the breeze. As the sun's rays gently caress." + }, + { + "type": "image", + "bbox": [ + 0.878, + 0.413, + 0.898, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.878, + 0.451, + 0.898, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.496, + 0.907, + 0.566 + ], + "angle": 0, + "content": "Figure 1. (a) Limitations of existing Multimodal Instruction Following (IF) benchmarks. (b) Overview of the MM-IFEval benchmark, which significantly surpasses existing benchmarks in terms of constraint diversity, quantity, and instruction complexity. Our benchmark consists of Compose-Level (C-Level) problems that impose constraints on model outputs (e.g., format requirements, keyword limits) and Perception-Level (P-Level) problems that require reasoning about specific visual elements in images. (c) Our MM-IFEngine generates a large-scale, diverse training dataset suitable for both Supervised Fine-Tuning (SFT) and Direct Preference Optimization (DPO)." + }, + { + "type": "title", + "bbox": [ + 0.25, + 0.594, + 0.326, + 0.608 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.628, + 0.486, + 0.87 + ], + "angle": 0, + "content": "The Instruction Following (IF) ability measures how well Multi-modal Large Language Models (MLLMs) understand exactly what users are telling them and whether they are doing it right. Existing multimodal instruction following training data is scarce, the benchmarks are simple with atomic instructions, and the evaluation strategies are imprecise for tasks demanding exact output constraints. To address this, we present MM-IFEngine, an effective pipeline to generate high-quality image-instruction pairs. Our MM-IFEngine pipeline yields large-scale, diverse, and high-quality training data MM-IFInstruct-23k, which is suitable for Supervised Fine-Tuning (SFT) and extended as MM-IFDPO-23k for Direct Preference Optimization (DPO). We further introduce MM-IFEval, a challenging and diverse multi-modal instruction-following benchmark that includes (1) both compose-level constraints for output re" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.581, + 0.909, + 0.732 + ], + "angle": 0, + "content": "sponses and perception-level constraints tied to the input images, and (2) a comprehensive evaluation pipeline incorporating both rule-based assessment and judge model. We conduct SFT and DPO experiments and demonstrate that fine-tuning MLLMs on MM-IFInstruct-23k and MM-IFDPO-23k achieves notable gains on various IF benchmarks, such as MM-IFEval \\((+10.2\\%)\\), MIA \\((+7.6\\%)\\), and IFEval \\((+12.3\\%)\\). We have fully open-sourced the datasets (both SFT and DPO), evaluation code and training scripts at https://github.com/SYuan03/MM-IFEngine." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.768, + 0.644, + 0.782 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Instruction Following (IF) is a fundamental ability in Large Language Models (LLMs) [14, 27, 35, 53, 57] and Multimodal Large Language Models (MLLMs) [2, 34], which involves accurately interpreting and executing user-provided instructions. This ability is crucial for deploying models in real-world applications where users expect precise and context-aware responses, such as code" + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.887, + 0.371, + 0.901 + ], + "angle": 0, + "content": "* Equal contribution.☑ Corresponding authors." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.059, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.07957v2 [cs.CV] 27 Apr 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.213 + ], + "angle": 0, + "content": "generation [44], visual question answering [17], robots [38], and creative content creation [58]. For instance, in a VQA scenario, when a user asks an MLLM what is the object and how do I use it, return the object name and the usage instructions in a JSON format, accurate IF ensures the model provides a response like {object': 'hammer', 'usage': 'use it to drive nails'} instead of the plain text." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.215, + 0.487, + 0.472 + ], + "angle": 0, + "content": "Achieving precise IF in multimodal, diverse, and open-ended environments presents significant challenges for both model training and benchmark evaluation. One significant limitation is the scarcity of high-quality IF training data to train open-source MLLMs. In addition, current multimodal IF benchmarks [2, 34] merely have simple, atomic instructions, and the constraints are weakly correlated with visual content (see Fig. 1 (a)). Consequently, existing benchmarks lack the diversity required for real-world applications, leading to saturated results where nearly all models achieve over \\(80\\%\\). Furthermore, the evaluation method in existing benchmarks often relies on LLM-as-a-judge [56], which is imprecise for instructions demanding exact output constraints, such as word counts. Therefore, the combination of limited training data, simple benchmarks, and imprecise evaluation strategy strongly restricts the progress of current MLLMs in IF." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.476, + 0.487, + 0.747 + ], + "angle": 0, + "content": "To address the lack of high-quality IF training data and challenging benchmarks, we propose MM-IFEngine, an effective pipeline for generating high-quality image-instruction pairs. MM-IFEngine collects diverse image sources, including natural scenes, UI interfaces, diagrams, charts, and mathematical problems. We then employ a structured approach using a predefined set of 16 task descriptions and 32 constraints to guide the LLM in crafting tailored instructions for each image. Using MM-IFEngine, we generated a comprehensive dataset of image-instruction pairs, collected responses from open-source MLLMs, and applied rigorous post-processing to retain only high-quality instruction-answer pairs, thus constructing MM-IFInstruct-23k for Supervised Fine-Tuning (SFT). We also generate negative responses by selectively removing constraints from the original data, constructing the preference dataset MM-IFDPO-23k for preference optimization algorithms such as Direct Preference Optimization (DPO) [36]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.487, + 0.902 + ], + "angle": 0, + "content": "To facilitate the evaluation of multimodal IF, we present MM-IFEval, a benchmark comprising 400 challenging problems with diverse compose-level and perception-level instructions. MM-IFEval is derived from the images and instructions generated by MM-IFEngine with human-labeled annotations. As presented in Fig. 1 (b), our MM-IFEval has the following three distinctive features: (1) Diverse Instruction Types: MM-IFEval has 32 distinct constraints, ensuring a wide range of instruction complexities and surpassing the scope of prior benchmarks. (2) Hybrid Evaluation: we use" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.909, + 0.257 + ], + "angle": 0, + "content": "a hybrid strategy including both rule-based verification and judge model. For subjective instructions (e.g., mimicking tone), we design a comparative judgment for precise evaluation. Specifically, a control output is generated without the constraint, and the LLM judge compares both outputs for precise evaluation. (3) Challenging: the leading proprietary model (GPT-4o at \\(64.6\\%\\) ) and open-source model (Qwen2-VL-72B at \\(50.8\\%\\) ) demonstrating substantial room for improvement on our benchmark, highlights a significant opportunity for improvement in multimodal instruction following." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.258, + 0.909, + 0.394 + ], + "angle": 0, + "content": "We further demonstrate that fine-tuning MLLMs on either MM-IFInstruct-23k or MM-IFDPO-23k consistently boosts the performance of MLLMs on instruction following benchmarks, without compromising their original capabilities on other Visual Question Answering (VQA) benchmarks. Specifically, fine-tuning Qwen2-VL-7B on MM-IFDPO-23k with the DPO results in performance gains of \\(10.2\\%\\), \\(7.6\\%\\), and \\(12.3\\%\\) on MM-IFInstruct-23k, MIA-Bench [34], and IFEval [57], respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.394, + 0.911, + 0.56 + ], + "angle": 0, + "content": "Our contributions include: (1) a MM-IFEngine pipeline for generating multimodal constraint-rich image-instruction pairs; (2) a large-scale training dataset MM-IFInstruct-23k and preference optimization dataset MM-IFDPO-23k derived from MM-IFEngine; (3) a challenging multimodal instruction following benchmark MM-IFEval with diverse constraints and comprehensive evaluation approaches; and (4) empirical evidence showing significant performance gains on both our MM-IFEval and existing benchmarks when training MLLMs on MM-IFInstruct-23k via SFT and MM-IFDPO-23k via DPO." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.573, + 0.657, + 0.59 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.599, + 0.909, + 0.884 + ], + "angle": 0, + "content": "Instruction Following in LLMs. Various benchmarks and training approaches have been proposed to make Large Language Models (LLMs) better align with human instructions. While existing Instruction Following (IF) benchmarks like [14, 35, 53, 57] all aim to evaluate instruction following, they differ significantly in their dataset construction pipelines, driven by their unique constraint taxonomies. CFBench [53], for instance, constructs its dataset using a combination of taxonomic and statistical methodologies to establish comprehensive constraints. This divergence extends to their evaluation strategies. For example, InFoBench [35] adopts a strategy of decomposing complex instructions into simpler assessment standards. Beyond benchmarks, various training approaches aim to enhance LLMs' instruction-following capabilities [29, 44], including in-context learning [58] and preference optimization [54]. However, he aforementioned research is limited to the text modality, whereas our work focuses on multi-modal instruction following with vision inputs." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.886, + 0.909, + 0.901 + ], + "angle": 0, + "content": "Instruction Following Benchmarks in MLLMs. Numerical" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.506, + 0.937 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.092, + 0.331, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.092, + 0.603, + 0.437 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.609, + 0.092, + 0.904, + 0.437 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.453, + 0.907, + 0.525 + ], + "angle": 0, + "content": "Figure 2. Overall pipeline of MM-IFEngine. Part (a) demonstrates the three-stage workflow of our engine: (1) Image filter; (2) Task generation using GPT-4o for images without QA pairs and instruct refinement for existing annotations; and (3) Constraints integration incorporating 6 main categories and 32 subcategories, ensuring compatibility between constraints and tasks. MM-IFEngine is employed to generate SFT and DPO training datasets and MM-IFEval benchmark, as shown in part (b) and (c). MM-IFEval implements three evaluation metrics combining rule-based verification functions and a judge model to ensure accurate assessment." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.55, + 0.486, + 0.746 + ], + "angle": 0, + "content": "ous benchmarks [18] have been proposed to evaluate diverse capabilities of Multi-modal Large Language Models (MLLMs), including general knowledge [5, 24, 48, 50], document understanding [15, 25, 30], perception [43, 52], multi-image comprehension [26, 39, 40], and instruction following (IF) [2, 34]. MIA-Bench [34] and VisIT-Bench [2] are representative IF benchmarks that employ GPT-4 [32] for question generation and evaluation. In contrast to existing IF benchmarks, our MM-IFEval introduces significant improvements in diversity (32 constraint categories covering compositional and perceptual aspects), difficulty (averaging 5.1 constraints per question), and evaluation precision (using both judge models and rule-based verification)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.486, + 0.902 + ], + "angle": 0, + "content": "Instruction Tuning Data for MLLMs. Recent advancements in multi-modal instruction tuning data aim to improve cross-modal alignment and increase the variety of tasks handled by MLLMs [4, 8, 20, 26, 45, 46, 51]. For example, some previous works [3, 4, 23] build synthetic instruction tuning data generated using GPT-4V [33], enabling open-source MLLMs to achieve performance comparable to proprietary models across multiple benchmarks. However, existing instruction tuning data are mainly designed for general knowledge or visual perception, and data for" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.55, + 0.907, + 0.596 + ], + "angle": 0, + "content": "improving the IF abilities is scarce. The scarcity of training data for enhancing IF abilities motivated the development of our MM-IFEngine pipeline." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.612, + 0.663, + 0.63 + ], + "angle": 0, + "content": "3. MM-IFEngine" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.639, + 0.909, + 0.837 + ], + "angle": 0, + "content": "We employ the MM-IFEngine pipeline to generate image-instruction pairs, which are the foundation for creating instruction tuning data and our benchmark. As shown in Fig. 2 (a), the pipeline is composed of three main stages: (1) image filtering, where we systematically select a diverse set of images from multiple sources to ensure broad coverage of visual content; (2) task generation, in which we either synthesize novel tasks tailored to the selected images or refine existing instruction templates to better align with the image content; and (3) constraint integration, where high-quality, constraint-aware instructions are generated for images that initially lack associated annotated guidance, thereby enhancing the richness and precision of the dataset." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.848, + 0.646, + 0.865 + ], + "angle": 0, + "content": "3.1. Image Filter" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Our image filtering strategy selects only high-quality images by removing those with low resolution or limited semantic" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.211 + ], + "angle": 0, + "content": "richness. For unannotated pure image datasets (e.g., CC3M [37]), we prioritize natural scene images. Rich semantic content in these images enables the creation of more comprehensive and insightful QA pairs, which is crucial for designing diverse and complex instruction following tasks. We use the IC9600 and RAM metric proposed in the previous method [55] to select the images that have rich semantic content." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.212, + 0.482, + 0.318 + ], + "angle": 0, + "content": "Furthermore, we analyze existing annotated datasets, such as ALLaVA [3]. Our analysis reveals that some images suffer from low resolution, making them inadequate for the instruction-following task. Given our intention to design more intricate and varied instruction following tasks based on this data, we filter out data items containing low-quality images." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.328, + 0.255, + 0.342 + ], + "angle": 0, + "content": "3.2. Task Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.349, + 0.483, + 0.515 + ], + "angle": 0, + "content": "Image Source without Original QA Pairs. For image datasets lacking original annotated task instructions (e.g., CC3M [37]), we first design appropriate task instructions for the data items. We first develop a series of task instructions tailored to the data items. These instructions are crafted to elicit long-form responses that can be subsequently modified or refined using various constraints, for instance, Provide a detailed analysis of the image, including the setting, characters, and notable objects. The final task pool \\(\\mathcal{P}_T\\) comprises a total of 16 distinct tasks, with further details available in Appendix A.1.2." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.516, + 0.484, + 0.591 + ], + "angle": 0, + "content": "Given the task pool \\(\\mathcal{P}_T\\), we randomly select \\(k\\) tasks as examples of task types for each image \\(I\\). We then prompt a powerful language model \\(\\mathcal{M}\\) (e.g., GPT-4o) to generate an appropriate task list \\(T_l\\) that aligns with the image content. The process is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.606, + 0.483, + 0.623 + ], + "angle": 0, + "content": "\\[\n\\left\\{T _ {l} ^ {*} \\right\\} = \\mathcal {M} \\left(I, T _ {e}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.629, + 0.483, + 0.749 + ], + "angle": 0, + "content": "where \\( T_{e} = \\{T_{1}, T_{2}, \\ldots, T_{k}\\} \\) and each \\( T_{i} \\in \\mathcal{P}_{T} \\). The model \\( \\mathcal{M} \\) is tasked with either choosing relevant tasks from \\( T_{e} \\) or supplementing reasonable tasks to construct the appropriate task list \\( T_{l}^{*} \\), ensuring that all tasks in \\( T_{l}^{*} \\) are in line with the image content. After generating the \\( T_{l}^{*} \\), a sampling step is incorporated to guarantee task diversity. For each image, tasks are sampled. This sampling process is crucial as it enriches the variety of tasks associated with each image." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Image Source with QA Pairs. In the case of image datasets that have QA pairs (e.g., ALLaVA [3]), we adopt certain strategies for processing the original question annotations. We choose ALLaVA as the primary dataset for this type of image source due to its rich and diverse image content, which is accompanied by a variety of task types. First, we conduct an analysis of the original question annotations. We find that some of the questions are accompanied by some few-shot examples. Additionally, some questions in ALLaVA have options in their original annotations, which are not" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.243 + ], + "angle": 0, + "content": "suitable for our instruction-following task. Since we need to incorporate certain constraints into the original instructions in the subsequent steps, we use regular expressions and length limits to filter the questions in ALLaVA. Specifically, we select those questions that do not have few-shot examples associated with them. Mathematically, if we let \\( Q \\) be the set of all questions in ALLaVA, \\( Q_{fs} \\) be the subset of questions with few-shot examples, and \\( Q_{op} \\) be the subset of questions with options. We aim to find the subset \\( Q_{s} \\) of questions that satisfy the conditions:" + }, + { + "type": "equation", + "bbox": [ + 0.59, + 0.254, + 0.907, + 0.271 + ], + "angle": 0, + "content": "\\[\nQ _ {s} = \\left\\{q \\in Q | q \\notin Q _ {f s} \\wedge q \\notin Q _ {o p} \\right\\} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.281, + 0.907, + 0.341 + ], + "angle": 0, + "content": "where the filtering based on the absence of few-shot examples and options is achieved using regular expressions and length limits. Then, we get the expected \\( T^{*} \\) in our filter \\( Q_{s} \\) set for the images." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.351, + 0.731, + 0.367 + ], + "angle": 0, + "content": "3.3. Constraints Integration" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.373, + 0.907, + 0.552 + ], + "angle": 0, + "content": "Constraints Pool \\((\\mathcal{P}_C)\\) We use instruction to refer to the entire textual input, which in our paper can generally be viewed as a composition of a task instruction and multiple constraints instruction. Tasks and constraints are rich and diverse, with a certain complexity in our work. All the constraints in our work can be further classified into six major categories, each with its own unique characteristics and applications: Text Length Requirements, Mathematical Requirements, Language & Formatting Requirements, Rhetoric & Logic Requirements, Action Requirements, and Keyword Requirements. Please refer to the Appendix Fig. 5 for more details of all the constraints." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.555, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Given the constraints pool \\(\\mathcal{P}_C\\) and task instructions, a straightforward approach for composing full instruction is to first set several constraints for each constraint type and then randomly select one constraint from some of the types to compose the constraint list, and finally concatenate the constraint list with the task instruction to form the full instruction. But this direct method has two problems: (1) The constraints are not diverse enough, which may not be able to fully evaluate the ability of the model. (2) The contradiction between the constraints and also between the constraints and the task instruction may exist. For the first problem, an LLM is employed to generate concrete content of constraint instruction for the specific constraint type in our method. In order to avoid the generated content being too divergent or hard to control its difficulty, we carefully design some cases or requirements of details that needed to be paid attention to when generating the content for each constraint type (Appendix A.1.1). For the second problem, we also use a powerful LLM to help keep the correlation of constraints with its instruction and filter out those that cause total contradiction. Finally, we prompt an LLM to check whether the constraints and the task instruction are compatible and filter out those failing to pass the check. Our method not only" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.121 + ], + "angle": 0, + "content": "ensures the compatibility of constraints and instructions but also enriches the diversity of constraints." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.123, + 0.484, + 0.424 + ], + "angle": 0, + "content": "In our actual practice process, we find that although we prompt the LLM to select appropriate constraints that should be compatible with the task instruction and other constraints, the generated constraints still have some contradiction with the task instruction, especially on those existing datasets with various kinds of annotations. The reason is that these datasets are designed for overall question-answering tasks, and the question(or named task instruction) tends to be contradictory with the constraints, which are mostly compatible with those tasks of creating or answering in non-short form. So, we decouple the selection and generation steps for this type of data source. Specifically, we first select the constraints from the constraints pool \\(\\mathcal{P}_C\\) and then provide the selected mostly compatible constraints to the LLM to select secondly and generate final constraints. But for image datasets without original QA pairs, in other words, for which we generate task instructions for them using \\(\\mathcal{P}_T\\), we directly sample k constraint types for the LLM to generate concrete content because they are mostly compatible with the pre-designed task instruction. The uniform process is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.435, + 0.483, + 0.454 + ], + "angle": 0, + "content": "\\[\nC _ {l} ^ {*} = \\mathcal {L} \\left(C _ {s}, T ^ {*}\\right), C _ {f} ^ {*} = \\mathcal {V} \\left(C _ {l} ^ {*}, T ^ {*}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.463, + 0.483, + 0.629 + ], + "angle": 0, + "content": "where \\(\\mathcal{T}^*\\) is the task applicable to the image. The model \\(\\mathcal{L}\\) is tasked with both choosing appropriate constraint types from \\(C_s\\) again and generating concrete constraints for some of them, whose output is a list of concrete constraint descriptions. To ensure that the generated constraints remain compatible with the given task instruction \\(T^*\\), we employ a final validation step using another LLM process, denoted as \\(\\mathcal{V}\\). This validation function checks whether each constraint in \\(C_l^*\\) aligns with \\(T^*\\) and filters out those that contradict or do not fit the task instruction. The resulting set of fully verified and compatible constraints is represented as \\(C_f^*\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.63, + 0.483, + 0.901 + ], + "angle": 0, + "content": "MM-IFInstruct-23k Construction. By applying the MM-IFEngine pipeline, we construct the MM-IFInstruct-23k dataset, which contains 23k high-quality multi-modal instruction-following training data. We first take an analysis of the performance of the current open-source MLLMs and proprietary MLLMs on several benchmarks [25, 34], and find that for instruction-following capability, the most powerful open-source MLLM like InternVL2.5-78B-MPO [42] is nearly equivalent to GPT-4o, and the performance on general VQA benchmarks are even higher than GPT-4o. Thus, we use InternVL2.5-78B-MPO to generate responses for our MM-IFInstruct-23k dataset. Despite its capabilities, the InternVL2.5-78B-MPO model encounters difficulties in ensuring \\(100\\%\\) compliance with our constraints, a challenge attributed to the complexity, number, and comprehensiveness. Consequently, we implement a post-processing stage to filter out responses that do not meet the specified criteria. Acknowledging that achieving perfect constraint adherence" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.198 + ], + "angle": 0, + "content": "might be challenging even for human annotators on this task, we set a practical accuracy threshold of \\(80\\%\\). Finally, our MM-IFInstruct-23k comprises 23k data items, with 16k constructed from the training set of CC3M, 6k from ALLaVA, and 4k from the training set of MultiUI, Geo170k[12] and ChartQA[31]. We show the distribution of constraints number of MM-IFInstruct-23k in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.199, + 0.909, + 0.484 + ], + "angle": 0, + "content": "MM-IFDPO-23k Construction. To comprehensively explore and make full use of our high-quality data, we also utilize MM-IFEngine to construct MM-IFDPO-23k, a preference dataset comprising chosen and rejected samples suitable for Direct Preference Optimization (DPO) [36]. Our high-quality data can be directly employed as the chosen samples. Regarding rejected samples, we opt to utilize Qwen2-VL-7B-Instruct to answer the variant of the question for generating rejected pairs. Specifically, we have four distinct settings for generating negative pairs, which mainly differ in the input to Qwen2-VL-7B-Instruct. These settings include (1) With image, but randomly remove one-third of the number of constraints in the prompt; (2) With image, but randomly remove two-thirds of the number of constraints in the prompt; (3) With image, but randomly remove all the constraints in the prompt; and (4) Full prompt, but without the image; We use these four types of input to feed into Qwen2-VL-7B-Instruct model, and collect the rejected responses to construct the MM-IFDPO-23k." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.497, + 0.64, + 0.513 + ], + "angle": 0, + "content": "4. MM-IFEval" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.523, + 0.908, + 0.614 + ], + "angle": 0, + "content": "Existing benchmarks for multi-modal instruction following are scarce. The majority focus on simple and atomic instructions, resulting in performance saturation across models. To address this limitation, we introduce MM-IFEval, a human-annotated, comprehensive, and challenging benchmark designed for evaluating multi-modal IF." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.623, + 0.748, + 0.637 + ], + "angle": 0, + "content": "4.1. MM-IFEval Construction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.645, + 0.909, + 0.84 + ], + "angle": 0, + "content": "To construct the MM-IFEval, we first use our MM-IFEngine to generate the question-answer (QA) pairs for images. The generated instructions may inherently contain potential conflicts. Consequently, human annotation remains critical for constructing this benchmark, as human annotators possess the cognitive capacity for comprehensive assessment of these complex situations. After the human annotation, we further use an extra post-processing step that prompts the LLMs to double-check and mitigate the occurrence of constraint conflicts as much as possible. Finally, we construct the MM-IFEval bench of 400 questions, 300 of which are compose-level open-ended questions and 100 perception-level questions with ground truth." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.901 + ], + "angle": 0, + "content": "Diverse Constraints. With 32 distinct constraint categories and an average of 5.1 constraints per question, MM-IFEval presents a more challenging evaluation task compared to earlier benchmarks (e.g., [34], which has 8 categories and 2.6" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.096, + 0.473, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.098, + 0.311, + 0.496, + 0.367 + ], + "angle": 0, + "content": "Figure 3. Constraint Quantity Distribution in MM-IFInstruct-23k. Our MM-IFInstruct-23k exhibits systematic variation in constraint complexity, with each sample containing 3-12 constraints per instruction." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.395, + 0.484, + 0.47 + ], + "angle": 0, + "content": "average constraints per question). Furthermore, our benchmark incorporates essential constraints such as \"Output in JSON format\", which is prevalent and practical in real-world scenarios, a feature not found in previous multi-modal instruction following benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.471, + 0.484, + 0.608 + ], + "angle": 0, + "content": "Compose-level and Perception-level Questions. compose-level questions involve textual constraints, while perception-level questions require greater visual perception ability to solve. The perception-level questions incorporate a variety of image sources, such as natural scenes, user interfaces, diagrams, table charts, and mathematical expressions, which we believe are representative of real-world applications. Please refer to the Appendix for examples of compose-level and perception-level questions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.62, + 0.273, + 0.637 + ], + "angle": 0, + "content": "4.2. Hybrid Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.643, + 0.486, + 0.824 + ], + "angle": 0, + "content": "Current multi-modal instruction following benchmarks often rely solely on GPT-4o for evaluation. However, accurately assessing certain constraints, such as numerical conditions (e.g., 'output in 200 words', 'Answer in 5 paragraphs', 'Use the word 'cat' in the answer twice'), remains challenging even for GPT-4o. In contrast, verifiable functions like string matching offer greater precision than judge models for such constraints. To address this, we propose a hybrid evaluation strategy (see Fig. 2(c)) that employs three methods, including both rule-based Verification and judge models for more robust and precise evaluation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.486, + 0.902 + ], + "angle": 0, + "content": "(1) Rule-based Verification. For constraints that adhere to a fixed format and involve specific content that can be objectively verified—yet remain challenging for an LLM to assess accurately—we employ a rule-based approach. Specifically, we design a set of predefined functions for different con" + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.091, + 0.837, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.502, + 0.312, + 0.9, + 0.368 + ], + "angle": 0, + "content": "Figure 4. Constraint Category Distribution inCompose-Level Problems of MM-IFEval. This part comprises six primary constraint categories with 32 subcategories, forming a multi-level taxonomy for instruction-following evaluation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.394, + 0.907, + 0.485 + ], + "angle": 0, + "content": "strand types. The LLM is first prompted to extract the relevant parameters, denoted as Params, from the constraint description. When evaluating a constraint that falls within the scope of our rule-based framework, we use Params and the model's output as inputs to the predefined function to determine compliance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.485, + 0.909, + 0.65 + ], + "angle": 0, + "content": "(2) LLM-based Direct Judgment. This method is primarily used for evaluating constraints that can be easily and unambiguously verified based on the model's output. It is applicable to constraints where correctness is straightforward to determine, such as those requiring the inclusion of specific words or phrases. For instance, a constraint like \"Use the word 'inspiration' or its synonyms at least twice in the response\" does not follow a strict format and cannot be assessed using a rule-based approach. Instead, we directly leverage an LLM to determine whether the constraint is satisfied." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.652, + 0.909, + 0.803 + ], + "angle": 0, + "content": "(3) LLM-based Comparative Judgment. Some constraints, particularly those related to tone, style, or role-playing, are difficult to evaluate directly. To improve judgment accuracy, we adopt a comparative approach. Specifically, we generate a second model output using a nearly identical prompt but without the constraint under evaluation. The LLM-based evaluator is then provided with both outputs and asked to compare them, determining whether the model's response with the constraint in the prompt adheres more closely to the expected requirement." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.816, + 0.646, + 0.832 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.908, + 0.902 + ], + "angle": 0, + "content": "Benchmarks. We select the following benchmarks to demonstrate that models fine-tuned on MM-IFInstruct-23k and MM-IFDPO-23k enhance instruction following without compromising performance on other VQA tasks: (1)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.907, + 0.146 + ], + "angle": 0, + "content": "Table 1. Main results on Instruction Following benchmarks, including our proposed MM-IFEval, MIA-Bench [34], and IFEval [57]. The symbol \\( {}^{\\mathrm{M}} \\) refers to multimodal benchmarks,and \\( {}^{\\mathrm{T}} \\) denotes text-only benchmarks. We report both compose-level (\"C\") and perception-level (\"P\") for MM-IFEval,prompt-level accuracy (\"Prompt.\")andInst-level accuracy (\"Inst.\")for IFEval,and the averaged results across all three benchmarks in the rightmost column." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.15, + 0.93, + 0.32 + ], + "angle": 0, + "content": "
ModelParameterMM-IFEvalM(ours)MIA MIFTAvg.
CPAvg.Prompt.Inst.Avg.
LLaVA-NeXT-7B [21]7B36.816.031.673.232.043.337.747.5
LLaVA-OneVision-Qwen2-7B-OV [16]8B37.424.034.084.543.354.849.055.8
InternVL2-8B [7]8B45.232.041.986.244.657.050.859.6
InternVL2.5-8B [6]8B49.636.046.288.552.262.457.364.0
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.783.345.056.450.757.9
w. MM-IFInstruct-23k-59.319.049.2 +9.586.5 +3.250.861.856.3 +5.664.0 +6.1
w. MM-IFDPO-23k-58.721.049.3 +9.690.0 +6.764.573.769.1 +18.469.5 +11.6
Qwen2-VL-7B-Instruct [41]8B42.740.042.080.542.452.547.456.6
w. MM-IFInstruct-23k-57.038.052.3 +10.387.7 +7.246.858.452.6 +5.264.2 +7.6
w. MM-IFDPO-23k-55.243.052.2 +10.288.1 +7.655.264.359.7 +12.366.7 +10.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.333, + 0.907, + 0.375 + ], + "angle": 0, + "content": "Table 2. Main results on VQA benchmarks, including general knowledge (MMMU [50], MMBench [24], MMStar [5], MMT-Bench [48]), document understanding (AI2D [15], OCRBench [25]), Chat (MMVet [49]) and Hallusion (POPE [19]). Fine-tuning models on MM-IFDPO-23k achieve comparable performance across these benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.379, + 0.921, + 0.496 + ], + "angle": 0, + "content": "
ModelGeneralDocumentChatHallusion
MMMUvalMMBenchdevMMStarMMT-BenchvalAI2DOCRBenchMM VetPOPEAvg.
LLaVA-NeXT-Llama3-8B [21]43.772.543.653.173.155.043.387.258.9
w. MM-IFInstruct-23k45.869.344.253.371.255.346.388.859.3
w. MM-IFDPO-23k44.172.143.753.172.356.743.986.859.1
Qwen2-VL-7B-Instruct [41]53.981.060.863.282.986.763.386.372.3
w. MM-IFInstruct-23k54.079.357.161.081.681.861.689.270.7
w. MM-IFDPO-23k54.081.358.563.783.386.866.185.772.4
" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.521, + 0.482, + 0.626 + ], + "angle": 0, + "content": "Instruction Following benchmarks, including MIA-Bench [34], IFEval [57], and our proposed MM-IFEval. To be noted, IFEval is a language-only benchmark while others are both multi-modal benchmarks. (2) VQA Benchmarks, including MMMU [50], MMBench [24], MMStar [5], AI2D [15], OCRBench [25], MMVet [49], POPE [19] and MMT-Bench [48]." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.627, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Implementation Details. We conducted SFT and DPO fine-tuning experiments on two representative MLLMs: Qwen2-VL-7B-Instruct [41] and LLaVA-Next-Llama3-8B [21], using our custom datasets MM-IFInstruct-23k for supervised fine-tuning (SFT) and MM-IFDPO-23k for direct preference optimization (DPO). For the SFT phase, we used a batch size of 128 and a learning rate of 1e-5. For the DPO phase, we used a learning rate of 5e-7 with the batch size of 16. We implemented our training pipeline with the help of LLaMAFactory and evaluation pipeline under VLMEvalkit [10]." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.788, + 0.483, + 0.818 + ], + "angle": 0, + "content": "5.1. Results about MM-IFInstruct-23k and MM-IFDPO-23k" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Consistently Improvements on Instruction Following Benchmarks. As shown in Tab. 1, both MM-IFInstruct-23k and MM-IFDPO-23k significantly enhance the model's performance in instruction following benchmarks. Finetuning LLaVA-Next and Qwen2-VL on MM-IFInstruct-23k" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.521, + 0.907, + 0.627 + ], + "angle": 0, + "content": "yielded significant averaging performance gains of \\(6.1\\%\\) and \\(7.6\\%\\) points, respectively. Furthermore, applying DPO with MM-IFDPO-23k also led to notable improvements for LLaVA-Next and Qwen2-VL, with average gains of \\(11.6\\%\\) and \\(10.1\\%\\) points. Such improvements demonstrate the effectiveness of MM-IFEngine in constructing high-quality training data." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.643, + 0.909, + 0.763 + ], + "angle": 0, + "content": "Comparable Results on VQA Benchmarks. To show that fine-tuning on MM-IFInstruct-23k and MM-IFDPO-23k improves instruction following without degrading performance on other VQA tasks, we analyzed model performance on other widely used benchmarks, as detailed in Tab. 2. Results indicate that models fine-tuning with MM-IFInstruct-23k and MM-IFDPO-23k demonstrate comparable performance across these benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.901 + ], + "angle": 0, + "content": "SFT vs DPO. As evidenced by Tab. 1 and Tab. 2, DPO using MM-IFDPO-23k significantly surpasses SFT on MM-IFInstruct-23k. This is likely due to negative samples of DPO, which are essential for training models to respect constraints, particularly in our data with multiple and diverse constraints. Additionally, the Kullback-Leibler (KL) divergence in DPO preserves the model's generalization, as demonstrated in Tab. 2." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.486, + 0.16 + ], + "angle": 0, + "content": "Table 3. Evaluation of various MLLMs on MM-IFEval. We report the accuracy of easy and difficult problems and the average accuracy across all problems. The C-Level and P-Level refer to the compose-level and perception-level problems, respectively. The best performance in each section is highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.163, + 0.485, + 0.4 + ], + "angle": 0, + "content": "
ModelParamC-LevelP-LevelAvg.
Proprietary MLLMs
Claude-3.5V-Sonnet [1]-67.544.061.7
GPT-4o-mini [13]-70.440.062.8
GPT-4o (20240806) [13]-71.544.064.6
Open-Source MLLMs
LLaVA-NeXT-7B [21]7B36.816.031.6
LLaVA-OneVision-Qwen2-7b-OV [16]8B37.424.034.0
MiniCPM-V-2.6 [47]8B39.232.037.4
InternVL2-8B [7]8B45.232.041.9
InternVL2-40B [7]40B48.036.045.0
InternVL2.5-8B [6]8B49.636.046.2
InternVL2.5-26B [6]8B53.532.048.1
Qwen2-VL-72B-Instruct [41]72B53.443.050.8
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.7
+ MM-IFDPO-23k-58.721.049.3
Qwen2-VL-7B-Instruct [41]8B42.740.042.0
+ MM-IFDPO-23k-55.243.052.2
" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.425, + 0.345, + 0.44 + ], + "angle": 0, + "content": "5.2. Leaderboard of MM-IFEval" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.447, + 0.483, + 0.522 + ], + "angle": 0, + "content": "We present the performance comparison results of various MLLMs on our MM-IFEval in Tab. 3, including both proprietary MLLMs such as GPT-4o [13] and Claude-3.5 [1] and open-source MLLMs such as LLaVA-Next [21], LLaVA-OneVision [16], InternVL [6, 7], and Qwen2-VL [41]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.522, + 0.483, + 0.779 + ], + "angle": 0, + "content": "MM-IFEval is Challenging. Results on Tab. 3 demonstrate that multimodal instruction following is still a challenging and unsolved task for current MLLMs, specifically for the perception-level problems. The propriety models GPT-4o and Claude-3.5V-Sonnet establish top-tier average performance with scores of 64.6 and 61.7, respectively. The leading open-source MLLM, Qwen2-VL-72B merely achieves an overall accuracy of 50.8. We attribute the performance gap between proprietary and open-source models to the scarcity of high-quality open-source training data for instruction following. As a result of our MM-IFDPO-23k, Qwen2-VL-7B fine-tuned via our optimized DPO approach achieves a score of 52.2, demonstrating a \\(24.3\\%\\) relative improvement over its baseline (42.0), and even surpasses the larger Qwen2VL-72B model. We hope our MM-IFEval benchmark motivates further exploration into improving MLLM instruction-following." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.779, + 0.484, + 0.84 + ], + "angle": 0, + "content": "Benchmark Examples. Please refer to the Appendix for visual examples of MM-IFEval, including images and instructions with constraints for both compose-level and perception-level problems." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.849, + 0.255, + 0.863 + ], + "angle": 0, + "content": "5.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Ablation Studies on Different DPO Settings. In Tab. 4, we present an ablation study on various strategies for con" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.089, + 0.907, + 0.158 + ], + "angle": 0, + "content": "Table 4. Ablation studies across different DPO settings, including randomly deleting constraints (second row to fourth row) or prompting MLLMs without images (bottom row) to generate negative responses. Avg. refers to the average score of three IF benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.163, + 0.905, + 0.323 + ], + "angle": 0, + "content": "
ModelMM-IFEvalMIAIFEvalAvg.
Qwen2-VL-7B-Instruct42.080.547.456.6
+ DPO (-33% cons)51.588.257.965.8
+ DPO (-66% cons)51.288.058.465.9
+ DPO (-100% cons)52.288.159.766.7
+ DPO (w/o img)48.486.954.763.4
LLaVA-NeXT-Llama3-8B39.783.350.757.9
+ DPO (-33% cons)50.487.264.367.3
+ DPO (-66% cons)48.786.869.768.4
+ DPO (-100% cons)49.390.069.169.5
+ DPO (w/o img)44.785.964.865.2
" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.343, + 0.907, + 0.435 + ], + "angle": 0, + "content": "structuring pairwise preference data for Direct Preference Optimization (DPO). These strategies primarily include: (1) generating rejected responses by randomly removing constraints from the instruction (second to fourth rows), and (2) prompting MLLMs without providing image inputs to generate rejected responses (bottom row)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.435, + 0.909, + 0.646 + ], + "angle": 0, + "content": "We conduct experiments on both the Qwen2-VL-7B-Instruct and LLaVA-NeXT-Llama3-8B models. As shown in Tab. 4, all DPO variants exhibit strong robustness, consistently outperforming the baseline. Among the four evaluated strategies, removing \\(100\\%\\) of the constraints to generate rejected responses achieves the best performance, whereas omitting image inputs yields the weakest performance. Furthermore, we observe a consistent trend: as the proportion of removed constraints increases from \\(33\\%\\) to \\(100\\%\\), the performance of the resulting DPO models improves accordingly. This suggests that removing more constraints amplifies the semantic gap between preferred and rejected responses, thereby enhancing the effectiveness of contrastive learning during DPO training." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.647, + 0.908, + 0.692 + ], + "angle": 0, + "content": "Based on these findings, we adopt the \\(100\\%\\) -constraint removal strategy as the default approach for constructing the DPO data in MM-IFDPO-23k." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.709, + 0.634, + 0.724 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.909, + 0.903 + ], + "angle": 0, + "content": "This paper contributes to the field of multimodal instruction-following by exploring pipelines for training data collection and proposing a challenging benchmark. We present MM-IFEngine, a pipeline designed to generate image-instruction pairs, subsequently used to construct MM-IFInstruct-23k for SFT and MM-IFDPO-23k for DPO. We also analyze the limitations of existing multimodal instruction following benchmarks and propose MM-IFEval, a benchmark featuring diverse instruction types and a hybrid evaluation strategy that combines rule-based methods with an LLM-based judge. We hope this work inspires further research into improving the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.486, + 0.138 + ], + "angle": 0, + "content": "instruction-following ability of Multimodal Large Language Models, a critical step towards realizing their potential in diverse and impactful applications." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.152, + 0.188, + 0.168 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.178, + 0.357, + 0.191 + ], + "angle": 0, + "content": "[1] Anthropic. Claude 3.5 sonnet. 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.193, + 0.484, + 0.262 + ], + "angle": 0, + "content": "[2] Yonatan Bitton, Hritik Bansal, Jack Hessel, Rulin Shao, Wanrong Zhu, Anas Awadalla, Josh Gardner, Rohan Taori, and Ludwig Schmidt. VisIT-Bench: A benchmark for vision-language instruction following inspired by real-world use. In NeurIPS, Datasets and Benchmarks, 2023. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.264, + 0.484, + 0.332 + ], + "angle": 0, + "content": "[3] Guiming Hardy Chen, Shunian Chen, Ruifei Zhang, Junying Chen, Xiangbo Wu, Zhiyi Zhang, Zhihong Chen, Jianquan Li, Xiang Wan, and Benyou Wang. Allava: Harnessing gpt4v-synthesized data for lite vision-language models. arXiv preprint arXiv:2402.11684, 2024. 3, 4, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.334, + 0.484, + 0.389 + ], + "angle": 0, + "content": "[4] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.392, + 0.484, + 0.447 + ], + "angle": 0, + "content": "[5] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? In NeurIPS, 2024. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.45, + 0.484, + 0.531 + ], + "angle": 0, + "content": "[6] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.534, + 0.484, + 0.603 + ], + "angle": 0, + "content": "[7] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.605, + 0.484, + 0.66 + ], + "angle": 0, + "content": "[8] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.662, + 0.484, + 0.745 + ], + "angle": 0, + "content": "[9] Biplab Deka, Zifeng Huang, Chad Franzen, Joshua Hibschman, Daniel Afergan, Yang Li, Jeffrey Nichols, and Ranjitha Kumar. Rico: A mobile app dataset for building data-driven design applications. In Proceedings of the 30th annual ACM symposium on user interface software and technology, pages 845-854, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.748, + 0.484, + 0.829 + ], + "angle": 0, + "content": "[10] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM international conference on multimedia, pages 11198-11201, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.832, + 0.484, + 0.9 + ], + "angle": 0, + "content": "[11] Xinyu Fang, Zhijian Chen, Kai Lan, Shengyuan Ding, Yingji Liang, Xiangyu Zhao, Farong Wen, Zicheng Zhang, Guofeng Zhang, Haodong Duan, et al. Creation-mmbench: Assessing context-aware creative intelligence in mllm. arXiv preprint arXiv:2503.14478, 2025. 3" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.178, + 0.484, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.161 + ], + "angle": 0, + "content": "[12] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023.5.2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.163, + 0.907, + 0.218 + ], + "angle": 0, + "content": "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. GPT-4o system card. arXiv preprint arXiv:2410.21276, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.219, + 0.907, + 0.286 + ], + "angle": 0, + "content": "[14] Yuxin Jiang, Yufei Wang, Xingshan Zeng, Wanjun Zhong, Liangyou Li, Fei Mi, Lifeng Shang, Xin Jiang, Qun Liu, and Wei Wang. Followbench: A multi-level fine-grained constraints following benchmark for large language models. In ACL, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.288, + 0.906, + 0.328 + ], + "angle": 0, + "content": "[15] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.33, + 0.907, + 0.385 + ], + "angle": 0, + "content": "[16] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. LLaVA-OneVision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.386, + 0.906, + 0.441 + ], + "angle": 0, + "content": "[17] Huayang Li, Siheng Li, Deng Cai, Longyue Wang, Lemao Liu, Taro Watanabe, Yujiu Yang, and Shuming Shi. TextBind: Multi-turn interleaved multimodal instruction-following in the wild. In ACL Findings, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.442, + 0.907, + 0.496 + ], + "angle": 0, + "content": "[18] Jian Li, Weiheng Lu, Hao Fei, Meng Luo, Ming Dai, Min Xia, Yizhang Jin, Zhenye Gan, Ding Qi, Chaoyou Fu, et al. A survey on benchmarks of multimodal large language models. arXiv preprint arXiv:2408.08632, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.498, + 0.906, + 0.538 + ], + "angle": 0, + "content": "[19] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.54, + 0.907, + 0.579 + ], + "angle": 0, + "content": "[20] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.581, + 0.907, + 0.622 + ], + "angle": 0, + "content": "[21] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR,and world knowledge,2024.7,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.624, + 0.907, + 0.678 + ], + "angle": 0, + "content": "[22] Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.679, + 0.907, + 0.748 + ], + "angle": 0, + "content": "[23] Yangzhou Liu, Yue Cao, Zhangwei Gao, Weiyun Wang, Zhe Chen, Wenhai Wang, Hao Tian, Lewei Lu, Xizhou Zhu, Tong Lu, Yu Qiao, and Jifeng Dai. Mminstruct: a high-quality multi-modal instruction tuning dataset with extensive diversity. Science China Information Sciences, 67(12), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.749, + 0.906, + 0.803 + ], + "angle": 0, + "content": "[24] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. MMBench: Is your multi-modal model an all-around player? In ECCV, 2024. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.804, + 0.906, + 0.872 + ], + "angle": 0, + "content": "[25] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. OCRBench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 2024. 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.906, + 0.902 + ], + "angle": 0, + "content": "[26] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.484, + 0.134 + ], + "angle": 0, + "content": "Lin, et al. MMDU: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. In NeurIPS Datasets and Benchmarks Track, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.135, + 0.484, + 0.175 + ], + "angle": 0, + "content": "[27] Renze Lou, Kai Zhang, and Wenpeng Yin. A comprehensive survey on instruction following. arXiv preprint arXiv:2303.10475, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.177, + 0.484, + 0.232 + ], + "angle": 0, + "content": "[28] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.233, + 0.484, + 0.3 + ], + "angle": 0, + "content": "[29] Ziyang Luo, Can Xu, Pu Zhao, Qingfeng Sun, Xiubo Geng, Wenxiang Hu, Chongyang Tao, Jing Ma, Qingwei Lin, and Daxin Jiang. Wizardcoder: Empowering code large language models with evol-instruct. arXiv preprint arXiv:2306.08568, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.302, + 0.484, + 0.37 + ], + "angle": 0, + "content": "[30] Yubo Ma, Yuhang Zang, Liangyu Chen, Meiqi Chen, Yizhu Jiao, Xinze Li, Xinyuan Lu, Ziyu Liu, Yan Ma, Xiaoyi Dong, et al. MMLongBench-Doc: Benchmarking long-context document understanding with visualizations. In NeurlPS Datasets and Benchmarks Track, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.372, + 0.484, + 0.426 + ], + "angle": 0, + "content": "[31] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.428, + 0.484, + 0.453 + ], + "angle": 0, + "content": "[32] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023. Accessed: 2025-02-23. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.455, + 0.484, + 0.481 + ], + "angle": 0, + "content": "[33] OpenAI. GPT-4V(ison) System Card. 2023. Accessed: 2025-02-23. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.484, + 0.484, + 0.537 + ], + "angle": 0, + "content": "[34] Yusu Qian, Hanrong Ye, Jean-Philippe Fauconnier, Peter Grasch, Yinfei Yang, and Zhe Gan. MIA-Bench: Towards better instruction following evaluation of multimodal llms. In ICLR, 2025. 1, 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.539, + 0.484, + 0.607 + ], + "angle": 0, + "content": "[35] Yiwei Qin, Kaiqiang Song, Yebowen Hu, Wenlin Yao, Sangwoo Cho, Xiaoyang Wang, Xuansheng Wu, Fei Liu, Pengfei Liu, and Dong Yu. InFoBench: Evaluating instruction following ability in large language models. arXiv preprint arXiv:2401.03601, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.609, + 0.484, + 0.676 + ], + "angle": 0, + "content": "[36] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.679, + 0.484, + 0.76 + ], + "angle": 0, + "content": "[37] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.762, + 0.484, + 0.83 + ], + "angle": 0, + "content": "[38] Lucy Xiaoyang Shi, Brian Ichter, Michael Equi, Liyiming Ke, Karl Pertsch, Quan Vuong, James Tanner, Anna Walling, Haohuan Wang, Niccolo Fusai, et al. Hi Robot: Open-ended instruction following with hierarchical vision-language-action models. arXiv preprint arXiv:2502.19417, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.832, + 0.484, + 0.872 + ], + "angle": 0, + "content": "[39] Dingjie Song, Shunian Chen, Guiming Hardy Chen, Fei Yu, Xiang Wan, and Benyou Wang. Milebench: Benchmarking mllms in long context, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.874, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[40] Fei Wang, Xingyu Fu, James Y. Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou," + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.908, + 0.162 + ], + "angle": 0, + "content": "Kai Zhang, Tianyi Lorena Yan, Wenjie Jacky Mo, Hsiang-Hui Liu, Pan Lu, Chunyuan Li, Chaowei Xiao, Kai-Wei Chang, Dan Roth, Sheng Zhang, Hoifung Poon, and Muhao Chen. Muirbench: A comprehensive benchmark for robust multi-image understanding, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.164, + 0.907, + 0.232 + ], + "angle": 0, + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.234, + 0.907, + 0.303 + ], + "angle": 0, + "content": "[42] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.305, + 0.907, + 0.373 + ], + "angle": 0, + "content": "[43] Xilin Wei, Xiaoran Liu, Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Jian Tong, Haodong Duan, Qipeng Guo, Jiaqi Wang, et al. Videorope: What makes for good video rotary position embedding? arXiv preprint arXiv:2502.05173, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.376, + 0.907, + 0.431 + ], + "angle": 0, + "content": "[44] Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.433, + 0.907, + 0.472 + ], + "angle": 0, + "content": "[45] Zhiyang Xu, Ying Shen, and Lifu Huang. Multiinstruct: Improving multi-modal zero-shot learning via instruction tuning, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.476, + 0.907, + 0.53 + ], + "angle": 0, + "content": "[46] Zhiyang Xu, Chao Feng, Rulin Shao, Trevor Ashby, Ying Shen, Di Jin, Yu Cheng, Qifan Wang, and Lifu Huang. Visionplan: Scaling human-labeled tasks in visual instruction tuning, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.907, + 0.588 + ], + "angle": 0, + "content": "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. MiniCPM-V: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.59, + 0.907, + 0.687 + ], + "angle": 0, + "content": "[48] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi, 2024. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.689, + 0.907, + 0.744 + ], + "angle": 0, + "content": "[49] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. MM-Vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.746, + 0.907, + 0.814 + ], + "angle": 0, + "content": "[50] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expertagi. In CVPR, 2024.3,7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.817, + 0.907, + 0.884 + ], + "angle": 0, + "content": "[51] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2. 5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.887, + 0.907, + 0.901 + ], + "angle": 0, + "content": "[52] Yuhang Zang, Wei Li, Jun Han, Kaiyang Zhou, and" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.908, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.484, + 0.12 + ], + "angle": 0, + "content": "Chen Change Loy. Contextual object detection with multimodal large language models. IJCV, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.484, + 0.19 + ], + "angle": 0, + "content": "[53] Tao Zhang, Yanjun Shen, Wenjing Luo, Yan Zhang, Hao Liang, Fan Yang, Mingan Lin, Yujing Qiao, Weipeng Chen, Bin Cui, et al. CFBench: A comprehensive constraints-following benchmark for llms. arXiv preprint arXiv:2408.01122, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.192, + 0.483, + 0.234 + ], + "angle": 0, + "content": "[54] Xinghua Zhang, Haiyang Yu, Cheng Fu, Fei Huang, and Yongbin Li. Iopo: Empowering llms with complex instruction following via input-output preference optimization, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.235, + 0.484, + 0.303 + ], + "angle": 0, + "content": "[55] Xiangyu Zhao, Shengyuan Ding, Zicheng Zhang, Haian Huang, Maosong Cao, Weiyun Wang, Jiaqi Wang, Xinyu Fang, Wenhai Wang, Guangtao Zhai, et al. Omnialign-v: Towards enhanced alignment of mllms with human preference. arXiv preprint arXiv:2502.18411, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.305, + 0.483, + 0.374 + ], + "angle": 0, + "content": "[56] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. In NeurIPS Datasets and Benchmarks Track, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.375, + 0.484, + 0.432 + ], + "angle": 0, + "content": "[57] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.433, + 0.484, + 0.486 + ], + "angle": 0, + "content": "[58] Wangchunshu Zhou, Yuchen Eleanor Jiang, Ethan Wilcox, Ryan Cotterell, and Mrinmaya Sachan. Controlled text generation with natural language instructions. In ICML, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.2, + 0.086, + 0.8, + 0.14 + ], + "angle": 0, + "content": "MM-IFEngine: Towards Multimodal Instruction Following Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.156, + 0.221, + 0.172 + ], + "angle": 0, + "content": "A. MM-IFEval" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.183, + 0.471, + 0.199 + ], + "angle": 0, + "content": "A.1. An overview of Constraints and Instructions" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.207, + 0.222, + 0.221 + ], + "angle": 0, + "content": "A.1.1. Constraints" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.227, + 0.482, + 0.318 + ], + "angle": 0, + "content": "Based on daily use cases and existing research, we have identified six main categories of constraints, which can be further divided into 32 specific constraint types shown in Fig. 5. In this section, we introduce and exemplify these six major constraint categories. For detailed descriptions and examples of all 32 subcategories, please refer to Table 5." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.32, + 0.483, + 0.516 + ], + "angle": 0, + "content": "Text Length Requirements. In this category, we focus on the length of the response, including the number of paragraphs, sentences, and words. We also consider the length of the response in the aspect of poetry or \"Use yes or no to answer the question\". It must be noted that we do not require the model to follow the strict requirement in exact numbers like \"The response must be exactly 56 words\". The constraints we propose in this category are based on reality, with precise numerical requirements only at the sentence or paragraph level, and of moderate size; the rest of the constraints are used to limit by ranges like \"The response must be between 100 and 150 words\", which aligns with the task that people tend to encounter in real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.518, + 0.483, + 0.638 + ], + "angle": 0, + "content": "Mathematical Requirements. This category includes constraints related to the most common part of answering mathematical problems like precision, scientific notation, and other mathematical requirements. For example, \"Keep two decimal places for the number in the answer\", \"Please round up all the numbers in the answer\", or \"Don't include specific numbers in your answers. Compare numbers with their relative sizes\"." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.641, + 0.483, + 0.746 + ], + "angle": 0, + "content": "Language & Formatting Requirements. This category includes constraints related to the language and formatting of the response, such as answering in a specific language, using a specific format like JSON, or using a specific style like poetry. Requirements for tense, writing style, numbering, list, and other language-related or formatting-related aspects are also included in this category." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.748, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Rhetoric & Logic Requirements. \"Rhetoric\" refers to the art of using language to persuade or influence, while \"Logic\" refers to the principles of reasoning and argumentation. This category includes constraints related to the rhetoric and logic of the response, such as the use of metaphor, simple, cause-and-effect relationship, conditional statement, and other rhetoric and logic-related aspects." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Action Requirements. \"Action\" refers to the action that the model should take like a human. We define this category as the constraints that require the model to perform a specific" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.158, + 0.905, + 0.233 + ], + "angle": 0, + "content": "action, such as tone, role imitation, use specific prefix or suffix, or acting like under some specific situation. We hope this category can help us to evaluate the ability of the model to follow instructions and perform actions in more complex and realistic scenarios." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.235, + 0.907, + 0.356 + ], + "angle": 0, + "content": "Keyword Requirements. \"Keyword\" refers to the specific words or phrases that the model should include or avoid in the response. This category includes constraints related to the response keyword, such as the use of specific keywords, the avoidance of specific keywords, or the variation of specific keywords. For example, \"Use at least three synonyms for 'innovation,' such as 'breakthrough,' 'new approach,' or 'invention,' spread throughout your text.\"" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.37, + 0.686, + 0.383 + ], + "angle": 0, + "content": "A.1.2. Instruction Tasks" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.39, + 0.906, + 0.51 + ], + "angle": 0, + "content": "For source datasets lacking original task instructions, we constructed a diverse task pool containing 18 instructions that encourage open-ended responses from models. These instructions can be categorized into five task types: Descriptive Analysis, Emotional & Perspective, Creative Writing, Social Media & Content, and Roleplay. The classification information and examples of the instructions are shown in Table 6." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.526, + 0.753, + 0.541 + ], + "angle": 0, + "content": "A.2. Perception-level Problems" + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.567, + 0.787, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.698, + 0.907, + 0.768 + ], + "angle": 0, + "content": "Figure 6. Image Source Distribution in perception-level problems.Perception-level problems in MM-IFEval presents a systematic categorization of 100 challenging vision-based instructionfollowing tasks, organized into 13 distinct classes according to image content characteristics and task complexity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.901 + ], + "angle": 0, + "content": "Perception-level problems in MM-IFEval comprise 100 carefully crafted questions with strong image-constraint correlations. The images can be categorized into 13 information-rich and complex domains shown in Figure 6. Figures 10, 11, 12, and 13 present representative examples from the web interface, diagram, poster, and visual difference categories, respectively, demonstrating the diverse visual challenges incorporated in our benchmark." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.503, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.089, + 0.825, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.273, + 0.908, + 0.304 + ], + "angle": 0, + "content": "Figure 5. Demonstration of constraints categories. We designed 6 main categories for all the constraints used, with a total of 32 subcategories" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.32, + 0.245, + 0.337 + ], + "angle": 0, + "content": "B. Image Sources" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.345, + 0.484, + 0.406 + ], + "angle": 0, + "content": "The quality of the image source is crucial for the performance of the model. Except of this, the diversity of the image source is also important to fully utilize or evaluate the ability of the model. We use the following image source:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.407, + 0.484, + 0.481 + ], + "angle": 0, + "content": "- Natural Scene: The natural scene is the most common image source, which is most used in the real-world like the image of a beautiful landscape, a busy street, or a crowded cafe. In this part, we sample images from CC3M[37] and ALLaVA[3]." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.482, + 0.484, + 0.587 + ], + "angle": 0, + "content": "- UI Interface: The UI interface is the image from the UI interface of the website and mobile application. It is crucial because it represents a significant portion of real-world multimodal interactions where users need to understand and interact with digital interfaces. We collected diverse mobile app UI images from the RICO[9] dataset and web UI images from the MultiUI[22] dataset." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.588, + 0.484, + 0.663 + ], + "angle": 0, + "content": "- Diagram & Chart: The diagram and chart are the image that contains some specific information like the data, the relationship between the data, or the change of the data. We collect diagram and chart images from ChartQA[31] dataset, which contains diverse diagram and chart images." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.664, + 0.484, + 0.755 + ], + "angle": 0, + "content": "- **Mathematic:** The math problem is the image that contains a math problem, which is a common task in the real-world like the problem of the math, the solution of the math problem, or the calculation of the math problem. We collect math problem images from Geo170k[12] dataset, which contains diverse geometry problem images." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.407, + 0.484, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.769, + 0.395, + 0.788 + ], + "angle": 0, + "content": "C. MM-IFEngine Prompt Template" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.484, + 0.902 + ], + "angle": 0, + "content": "MM-IFEngine provides a scalable pipeline for mass-producing instruction-following datasets for multimodal large language models, functioning effectively regardless of whether source datasets contain original instructions. This engine enables systematic augmentation of existing visual datasets with diverse instruction-following tasks. Figures 14 and 15 demonstrate representative prompt templates from" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.321, + 0.908, + 0.382 + ], + "angle": 0, + "content": "MM-IFEngine's two core components: the instruction generation module and the constraint integration module, respectively, illustrating the methodology behind our automated data construction process." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.404, + 0.887, + 0.421 + ], + "angle": 0, + "content": "D. MM-IFInstruct and MM-IFDPO Dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.433, + 0.909, + 0.613 + ], + "angle": 0, + "content": "Our MM-IFInstruct dataset integrates three distinct data sources: CC3M (without original instructions), ALLaVA (with pre-existing questions), and a diversity collection composed of MultiUI, ChartQA, and Geo170k. To create the MM-IFDPO dataset for preference optimization, we randomly removed \\(33\\%\\) of constraints from the MM-IFInstruct samples to generate rejected examples. Figures 16, 17, and 18 illustrate representative samples derived from CC3M, ALLaVA, and our diversity collection, respectively, while Figure 19 demonstrates an example pair from the MM-IFDPO dataset showing both preferred and rejected instructions." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.637, + 0.634, + 0.652 + ], + "angle": 0, + "content": "E. Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.664, + 0.64, + 0.679 + ], + "angle": 0, + "content": "E.1. Rule-based" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.909, + 0.901 + ], + "angle": 0, + "content": "We identified 10 constraint subcategories from our taxonomy of 32 that could be algorithmically verified. For these selected constraints, we developed specialized verification functions with targeted parameters. For efficiency, we employed large language models to analyze each constraint specification, select the most appropriate verification function, and extract the necessary parameters. All selections were subsequently validated through manual review to ensure the accuracy and quality of both the function selection and their parameters. The prompt template used for function selection and parameter extraction is illustrated in Figure 20, while Table 7 provides a comprehensive overview of all verification functions with their corresponding parameter examples." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.095, + 0.091, + 0.315, + 0.108 + ], + "angle": 0, + "content": "E.2. Compare Judge Method" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.113, + 0.483, + 0.294 + ], + "angle": 0, + "content": "Recent works[11, 28] have shown that GPT-4o has the ability to compare two responses from models. For constraint types lacking objective evaluation metrics (such as tone requirements or role imitation), we implemented a comparative assessment method. This approach requires the model under evaluation to generate two responses: one adhering to the target constraint and another without the constraint. A judge model then analyzes both outputs to determine whether significant differences exist between them, thereby more accurately assessing whether the model has successfully followed these subjective constraints. Figure 21 illustrates the prompt used in this comparative evaluation process." + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.303, + 0.292, + 0.319 + ], + "angle": 0, + "content": "E.3. Direct Judge Method" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.325, + 0.483, + 0.371 + ], + "angle": 0, + "content": "The Direct Judge method provides the constraint and answer of the model under test directly to the Judge model, and its prompt template is shown in Figure 22." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.133, + 0.116, + 0.327, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.104, + 0.493, + 0.141 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.112, + 0.651, + 0.133 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.156, + 0.845, + 0.228 + ], + "angle": 0, + "content": "What might have led to the dog's behavior as depicted in this image?" + }, + { + "type": "image", + "bbox": [ + 0.134, + 0.271, + 0.177, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.276, + 0.344, + 0.297 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.314, + 0.55, + 0.329 + ], + "angle": 0, + "content": "1.target Audience requirement: Your audience is a dog lover." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.33, + 0.757, + 0.343 + ], + "angle": 0, + "content": "2.tense所需要的: Use present tense in the first paragraph and past tense in the second." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.344, + 0.692, + 0.358 + ], + "angle": 0, + "content": "3.tone Requirement: Adopt a reassuring, empathetic tone as if consoling someone." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.359, + 0.673, + 0.373 + ], + "angle": 0, + "content": "4.paragraph_number_limit: Your response must consist of exactly 3 paragraphs." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.374, + 0.666, + 0.388 + ], + "angle": 0, + "content": "5.mention: Mention the term 'sorry' at least twice throughout your description." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.389, + 0.807, + 0.418 + ], + "angle": 0, + "content": "6highlight所需要的: Use bold for the first occurrence of the term 'aggressive behavior' in each paragraph." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.419, + 0.701, + 0.433 + ], + "angle": 0, + "content": "7wrap_up Requirement: Provide a final paragraph summarizing the key arguments." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.434, + 0.659, + 0.448 + ], + "angle": 0, + "content": "8. perspective Requirement: Please answer the question in the second person." + }, + { + "type": "list", + "bbox": [ + 0.133, + 0.314, + 0.807, + 0.448 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.158, + 0.481, + 0.837, + 0.496 + ], + "angle": 0, + "content": "Figure 7. A compose-level problem example from the MM-IFEval benchmark in the general image category." + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.518, + 0.424, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.516, + 0.504, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.525, + 0.656, + 0.544 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.57, + 0.87, + 0.644 + ], + "angle": 0, + "content": "Which region has the highest value of apple production? Give the answer, and analyze the reasons for the large yield of apples in this area." + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.689, + 0.183, + 0.725 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.697, + 0.354, + 0.717 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.74, + 0.625, + 0.757 + ], + "angle": 0, + "content": "1. precision: In the answer, plot the output in the same unit." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.758, + 0.763, + 0.775 + ], + "angle": 0, + "content": "2.title所需要的: Provide a concise title that summarizes the main idea." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.776, + 0.796, + 0.812 + ], + "angle": 0, + "content": "3. perspective Requirement: Give your answer from the perspective of a Mexican agricultural expert." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.813, + 0.848, + 0.831 + ], + "angle": 0, + "content": "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.832, + 0.759, + 0.85 + ], + "angle": 0, + "content": "5. unstrict_formatting REQUIREments: Number the reasons for your analysis." + }, + { + "type": "list", + "bbox": [ + 0.133, + 0.74, + 0.848, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.891, + 0.831, + 0.906 + ], + "angle": 0, + "content": "Figure 8. A compose-level problem example from the MM-IFEval benchmark in the chart image category." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.163, + 0.124, + 0.396, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.116, + 0.513, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.523, + 0.125, + 0.661, + 0.143 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.155, + 0.865, + 0.261 + ], + "angle": 0, + "content": "In triangle ABC, D is the midpoint of BC, E is the midpoint of AD, and F is the midpoint of CE. Given that the area of triangle ABC is 28 square centimeters, consider the impact of these midpoints on the subdivisions of the triangle. Analyze how these midpoints affect the areas of triangles within triangle ABC and provide a detailed explanation to find the area of the shaded region that is formed within triangle BEC and triangle AEC. Finally, deduce and conclude which part of the interior triangles contribute to the shaded area." + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.269, + 0.177, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.274, + 0.335, + 0.294 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.318, + 0.814, + 0.333 + ], + "angle": 0, + "content": "1.target Audience requirement: Write your answer for a liberal arts student. You're tutoring her in math." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.333, + 0.632, + 0.346 + ], + "angle": 0, + "content": "2(word_count_range_limit: Please write between 150 and 200 words in total." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.347, + 0.652, + 0.361 + ], + "angle": 0, + "content": "3.paragraph_number_limit: Your response must consist of exactly 4 paragraphs." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.362, + 0.689, + 0.374 + ], + "angle": 0, + "content": "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.375, + 0.701, + 0.388 + ], + "angle": 0, + "content": "5.not Mention: Please do not mention the words 'formula' or 'equation' in your answer." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.389, + 0.715, + 0.403 + ], + "angle": 0, + "content": "6.mention: Mention the word 'midpoint' at least three times throughout your description." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.404, + 0.838, + 0.431 + ], + "angle": 0, + "content": "7.tone Requirement: Write your answer in a positive and encouraging tone, emphasizing the simplicity of the geometric concepts involved." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.318, + 0.838, + 0.431 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.47, + 0.843, + 0.486 + ], + "angle": 0, + "content": "Figure 9. A compose-level problem example from the MM-IFEval benchmark in the geometry image category." + }, + { + "type": "image", + "bbox": [ + 0.145, + 0.591, + 0.165, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.166, + 0.592, + 0.191, + 0.604 + ], + "angle": 0, + "content": "熱門" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.613, + 0.173, + 0.635 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.615, + 0.219, + 0.634 + ], + "angle": 0, + "content": "BITCOIN BTC" + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.643, + 0.171, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.644, + 0.233, + 0.662 + ], + "angle": 0, + "content": "ETHEREUM ETH" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.671, + 0.172, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.673, + 0.235, + 0.692 + ], + "angle": 0, + "content": "TETHER U... USDT" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.7, + 0.174, + 0.722 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.702, + 0.206, + 0.72 + ], + "angle": 0, + "content": "USDC USDC" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.73, + 0.174, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.731, + 0.199, + 0.749 + ], + "angle": 0, + "content": "BNB BNB" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.758, + 0.174, + 0.779 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.76, + 0.204, + 0.777 + ], + "angle": 0, + "content": "BUSD BUSD" + }, + { + "type": "text", + "bbox": [ + 0.259, + 0.614, + 0.347, + 0.635 + ], + "angle": 0, + "content": "3,156,526.95 \\(0.76\\%\\)" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.644, + 0.323, + 0.663 + ], + "angle": 0, + "content": "86,060.91-2.64%" + }, + { + "type": "text", + "bbox": [ + 0.291, + 0.674, + 0.321, + 0.692 + ], + "angle": 0, + "content": "32.83-0.03%" + }, + { + "type": "text", + "bbox": [ + 0.29, + 0.702, + 0.321, + 0.721 + ], + "angle": 0, + "content": "32.83 -0.01%" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.731, + 0.322, + 0.75 + ], + "angle": 0, + "content": "19,024.08+0.47%" + }, + { + "type": "text", + "bbox": [ + 0.291, + 0.759, + 0.321, + 0.775 + ], + "angle": 0, + "content": "32.890.08%" + }, + { + "type": "image", + "bbox": [ + 0.33, + 0.616, + 0.347, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.645, + 0.347, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.448, + 0.541, + 0.494, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.551, + 0.649, + 0.571 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.586, + 0.861, + 0.665 + ], + "angle": 0, + "content": "If someone just bought the orange currency for \\(12,000 and the blue currency for \\)15,000, what is the total amount of money they have now, based on the current currency situation? Round off the decimal part of the answer." + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.697, + 0.495, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.703, + 0.69, + 0.724 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.772, + 0.726, + 0.791 + ], + "angle": 0, + "content": "26907" + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.878, + 0.816, + 0.894 + ], + "angle": 0, + "content": "Figure 10. A perception-level problem example from the MM-IFEval benchmark in the web category." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.154, + 0.391, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.141, + 0.503, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.15, + 0.654, + 0.17 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.184, + 0.86, + 0.256 + ], + "angle": 0, + "content": "In this flowchart, which node is reached after the first condition encountered from Start is judged to be Yes? Preserve the case of node names." + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.293, + 0.504, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.299, + 0.694, + 0.32 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "text", + "bbox": [ + 0.671, + 0.367, + 0.718, + 0.386 + ], + "angle": 0, + "content": "End" + }, + { + "type": "image_caption", + "bbox": [ + 0.166, + 0.47, + 0.831, + 0.486 + ], + "angle": 0, + "content": "Figure 11. A perception-level problem example from the MM-IFEval benchmark in the diagram category." + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.628, + 0.437, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.459, + 0.562, + 0.502, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.57, + 0.65, + 0.59 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.604, + 0.83, + 0.692 + ], + "angle": 0, + "content": "Observe the alphabet represented by white dots and line segments in the figure. Starting from 'A', what is the second letter composed of eight white dots? Output this letter in uppercase." + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.709, + 0.504, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.715, + 0.688, + 0.735 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.781, + 0.697, + 0.798 + ], + "angle": 0, + "content": "G" + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.869, + 0.825, + 0.884 + ], + "angle": 0, + "content": "Figure 12. A perception-level problem example from the MM-IFEval benchmark in the poster category." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.116, + 0.402, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.113, + 0.508, + 0.148 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.122, + 0.655, + 0.142 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.156, + 0.852, + 0.232 + ], + "angle": 0, + "content": "Sam and Tom used the red box and Tom used the blue box. They each gave three answers. Would you please judge which of the two boys found more differences? Print the name of the winning boy directly." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.26, + 0.402, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.262, + 0.509, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.267, + 0.695, + 0.288 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "text", + "bbox": [ + 0.67, + 0.334, + 0.72, + 0.352 + ], + "angle": 0, + "content": "Tom" + }, + { + "type": "image_caption", + "bbox": [ + 0.137, + 0.436, + 0.859, + 0.452 + ], + "angle": 0, + "content": "Figure 13. A perception-level problem example from the MM-IFEval benchmark in the finding difference category." + }, + { + "type": "title", + "bbox": [ + 0.149, + 0.47, + 0.519, + 0.492 + ], + "angle": 0, + "content": "Instruction generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.509, + 0.72, + 0.529 + ], + "angle": 0, + "content": "You are an expert in generating concise instructions for images." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.55, + 0.214, + 0.566 + ], + "angle": 0, + "content": "## Task" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.57, + 0.83, + 0.628 + ], + "angle": 0, + "content": "Given the image, generate a list of appropriate instructions for it. Your instructions should not be too long or overly detailed, and they should not include any specific details about the image." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.631, + 0.777, + 0.689 + ], + "angle": 0, + "content": "On one hand, you can choose appropriate instructions cases for the provided image from the Examples and modify them naturally for the image." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.691, + 0.83, + 0.73 + ], + "angle": 0, + "content": "On the other hand, you can generate new instructions, but only if these new instructions are relevant and appropriate for the image." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.75, + 0.26, + 0.769 + ], + "angle": 0, + "content": "Examples" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.771, + 0.376, + 0.79 + ], + "angle": 0, + "content": "{original instructions list}" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.81, + 0.622, + 0.829 + ], + "angle": 0, + "content": "You output format should be in the following format:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.831, + 0.284, + 0.85 + ], + "angle": 0, + "content": "{output format}" + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.894, + 0.822, + 0.909 + ], + "angle": 0, + "content": "Figure 14. Prompt template for image generation instructions using a large language model in MM-IFEngine." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.15, + 0.289, + 0.52, + 0.311 + ], + "angle": 0, + "content": "Constraint integration prompt" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.32, + 0.663, + 0.335 + ], + "angle": 0, + "content": "You are an expert in add appropriate constraints to the instruction for images." + }, + { + "type": "title", + "bbox": [ + 0.144, + 0.349, + 0.203, + 0.361 + ], + "angle": 0, + "content": "Task" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.363, + 0.776, + 0.378 + ], + "angle": 0, + "content": "Given the original instruction, your task is to expand the instruction by adding constraints to it." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.378, + 0.87, + 0.406 + ], + "angle": 0, + "content": "You can select **as many as possible** appropriate types of constraints from the given **Constraints List** below and modify them. However, ensure that the constraints you generate meet the following requirements:" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.407, + 0.563, + 0.42 + ], + "angle": 0, + "content": "1. Maintain the thematic consistency of the original instruction." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.421, + 0.628, + 0.435 + ], + "angle": 0, + "content": "2.Be relevant and appropriate for the original instruction and be concise." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.435, + 0.578, + 0.449 + ], + "angle": 0, + "content": "3. Do not conflict with the original instruction or with each other." + }, + { + "type": "list", + "bbox": [ + 0.145, + 0.407, + 0.628, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.463, + 0.861, + 0.536 + ], + "angle": 0, + "content": "For example, if the original instruction is a simple query like \"What color is the truck?\" you should avoid adding constraints such as \"Your response must be in three paragraphs,\" as such a requirement is unnecessary for a short and simple question. Moreover, if the original instruction is a question like \"What is the object in the image?\", you should avoid adding constraints such as \"Respond in the second-person to directly address the reader,\" as it conflicts with the original instruction." + }, + { + "type": "code", + "bbox": [ + 0.144, + 0.55, + 0.301, + 0.58 + ], + "angle": 0, + "content": "Original Instruction {originalInstruction}" + }, + { + "type": "code", + "bbox": [ + 0.144, + 0.593, + 0.518, + 0.638 + ], + "angle": 0, + "content": "```java\n## Constraints List\nYour added constraints can be from the following types:\n{constraints_list_str}" + }, + { + "type": "code", + "bbox": [ + 0.144, + 0.65, + 0.572, + 0.681 + ], + "angle": 0, + "content": "Output Format Your output should follow the format below: {output format}" + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.718, + 0.713, + 0.733 + ], + "angle": 0, + "content": "Figure 15. prompt template for integrating constraints in MM-IFEngine." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.167, + 0.276, + 0.422, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.381, + 0.159, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.39, + 0.243, + 0.403 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.417, + 0.443, + 0.446 + ], + "angle": 0, + "content": "Write a short poem that captures the hustle and bustle of city life depicted in this image." + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.472, + 0.145, + 0.498 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.158, + 0.476, + 0.243, + 0.489 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.504, + 0.417, + 0.536 + ], + "angle": 0, + "content": "1. Write each stanza so it includes a parenthetical phrase." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.538, + 0.41, + 0.552 + ], + "angle": 0, + "content": "2.Each stanza should have 4 lines." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.554, + 0.354, + 0.569 + ], + "angle": 0, + "content": "3.Use bold for every noun." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.571, + 0.434, + 0.618 + ], + "angle": 0, + "content": "4.Start the poem with 'In the heart of the city...' and end with '...the city never sleeps.'" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.62, + 0.422, + 0.651 + ], + "angle": 0, + "content": "5.Use present tense throughout the poem." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.653, + 0.434, + 0.685 + ], + "angle": 0, + "content": "6.Use the third-person perspective to maintain an objective viewpoint." + }, + { + "type": "list", + "bbox": [ + 0.156, + 0.504, + 0.434, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.294, + 0.532, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.533, + 0.306, + 0.591, + 0.318 + ], + "angle": 0, + "content": "Answer" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.355, + 0.863, + 0.381 + ], + "angle": 0, + "content": "In the heart of the city, where the \\*\\*streets\\*\\* hum with life." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.384, + 0.783, + 0.397 + ], + "angle": 0, + "content": "People rush (beneath the bright blue sky)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.398, + 0.804, + 0.412 + ], + "angle": 0, + "content": "Buses pass (\\*\\*tires\\*\\* whispering on the road)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.412, + 0.753, + 0.426 + ], + "angle": 0, + "content": "Every moment feels like a lively show." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.44, + 0.78, + 0.453 + ], + "angle": 0, + "content": "The \\*\\*signs\\*\\* stand tall amidst the crowd," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.454, + 0.808, + 0.468 + ], + "angle": 0, + "content": "An ambulance speeds by (with urgent sound)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.469, + 0.753, + 0.482 + ], + "angle": 0, + "content": "Newspapers rest (in their quiet stack)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.483, + 0.798, + 0.496 + ], + "angle": 0, + "content": "As footsteps weave through the urban track." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.51, + 0.732, + 0.524 + ], + "angle": 0, + "content": "Buildings rise, their facades grand," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.525, + 0.736, + 0.538 + ], + "angle": 0, + "content": "A man walks past (across the land)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.539, + 0.721, + 0.552 + ], + "angle": 0, + "content": "Voices blend into ambient noise," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.553, + 0.707, + 0.567 + ], + "angle": 0, + "content": "Life unfolds without any poise." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.581, + 0.744, + 0.595 + ], + "angle": 0, + "content": "Shoppers carry bags (full of delight)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.596, + 0.786, + 0.609 + ], + "angle": 0, + "content": "Sunlight dances (on the pavement's sight)," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.61, + 0.728, + 0.624 + ], + "angle": 0, + "content": "Each face tells tales of daily grind," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.625, + 0.833, + 0.638 + ], + "angle": 0, + "content": "Yet spirits soar, unconfined....the city never sleeps." + }, + { + "type": "image_caption", + "bbox": [ + 0.264, + 0.713, + 0.733, + 0.728 + ], + "angle": 0, + "content": "Figure 16. A sample constructed by MM-IFEngine pipeline from cc3m dataset" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.156, + 0.261, + 0.383, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.158, + 0.396, + 0.236, + 0.408 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.432, + 0.387, + 0.446 + ], + "angle": 0, + "content": "What treat can be baked in an oven?" + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.469, + 0.141, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.152, + 0.473, + 0.236, + 0.486 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.495, + 0.417, + 0.538 + ], + "angle": 0, + "content": "1. Write your answer for a general adult audience with limited baking experience, providing clear and simple explanations." + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.538, + 0.406, + 0.58 + ], + "angle": 0, + "content": "2. Write your answer in a positive and encouraging tone, making the baking process seem accessible and enjoyable." + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.58, + 0.425, + 0.623 + ], + "angle": 0, + "content": "3. Mention both 'oven' and 'baking' at least once in your response, ensuring they are used naturally and contextually." + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.623, + 0.411, + 0.664 + ], + "angle": 0, + "content": "4. Begin every sentence with an exclamation mark to convey excitement about baking." + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.665, + 0.421, + 0.707 + ], + "angle": 0, + "content": "5. Your response must consist of exactly 3 paragraphs, each focusing on a different type of treat that can be baked in an oven." + }, + { + "type": "list", + "bbox": [ + 0.15, + 0.495, + 0.425, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.475, + 0.264, + 0.523, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.525, + 0.275, + 0.581, + 0.287 + ], + "angle": 0, + "content": "Answer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.304, + 0.85, + 0.435 + ], + "angle": 0, + "content": "!One delightful treat you can bake in an oven is cookies! Whether you prefer classic chocolate chip or something more adventurous like oatmeal raisin, baking cookies is a fun and rewarding activity. The aroma of freshly baked cookies wafting through your home will make everyone excited!" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.855, + 0.567 + ], + "angle": 0, + "content": "! Another scrumptious option for oven baking is muffins! Muffins are perfect for breakfast or as a snack, and you can customize them with various ingredients such as blueberries, bananas, or even chocolate chips. Baking muffins allows you to experiment with flavors while enjoying the satisfaction of creating something delicious from scratch." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.853, + 0.7 + ], + "angle": 0, + "content": "! Lastly, consider baking a cake! Cakes come in countless varieties, from simple vanilla to rich chocolate, and can be decorated to suit any occasion. Baking a cake not only provides a sweet treat but also enhances your skills in mixing, measuring, and decorating. So grab your apron and get ready to enjoy the magic of baking in your own kitchen!" + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.739, + 0.736, + 0.754 + ], + "angle": 0, + "content": "Figure 17. A sample constructed by MM-IFEngine pipeline from Allava dataset" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.214, + 0.258, + 0.335, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.352, + 0.151, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.363, + 0.234, + 0.374 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.381, + 0.434, + 0.493 + ], + "angle": 0, + "content": "In the diagram, quadrilateral ABCD is inscribed in circle O with diameter BE and line AE connected to it. Given that the measure of angle BCD is twice the measure of angle BAD, and that angle DAE is equal to angle CAD, develop a comprehensive geometric proof to determine the degree measure of angle DAE." + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.499, + 0.138, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.151, + 0.502, + 0.235, + 0.514 + ], + "angle": 0, + "content": "Constraints" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.531, + 0.418, + 0.579 + ], + "angle": 0, + "content": "1. Your response must consist of exactly 3 paragraphs, each focusing on a different aspect of the proof: the initial setup, the application of theorems, and the final calculation." + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.58, + 0.424, + 0.616 + ], + "angle": 0, + "content": "2. Each paragraph should contain between 4 and 6 sentences, ensuring a balanced and detailed explanation." + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.617, + 0.406, + 0.653 + ], + "angle": 0, + "content": "3.The response must be between 120 and 150 words in total, maintaining clarity and conciseness." + }, + { + "type": "list", + "bbox": [ + 0.149, + 0.58, + 0.424, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.654, + 0.414, + 0.679 + ], + "angle": 0, + "content": "4. Write your answer in a clear and methodical tone, suitable for a high school geometry class." + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.679, + 0.422, + 0.716 + ], + "angle": 0, + "content": "5.Provide a concise title that summarizes the main idea of the proof, such as 'Geometric Proof for Angle DAE'." + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.266, + 0.525, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.526, + 0.277, + 0.582, + 0.289 + ], + "angle": 0, + "content": "Answer" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.314, + 0.861, + 0.697 + ], + "angle": 0, + "content": "**Geometric Proof for Angle DAE** To begin our proof, we note that since quadrilateral ABCD is inscribed in circle O with BE as its diameter, angle ABE is a right angle (90 degrees) by the Inscribed Angle Theorem. This theorem states that an angle inscribed in a semicircle is always a right angle. Additionally, given that angle BCD is twice angle BAD, let's denote angle BAD as \\(\\backslash(x\\backslash)\\). Therefore, angle BCD measures \\(\\backslash(2x\\backslash)\\). \\(\\backslash\\)n\\NNext, applying the properties of cyclic quadrilaterals, we know that opposite angles sum up to 180 degrees. Thus, angle BAD plus angle BCD equals 180 degrees. Substituting the values, we get \\(\\backslash(x + 2x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)\\), leading to \\(\\backslash(3x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)\\) or \\(\\backslash(x = 60^{\\wedge}\\backslash\\text{circ} \\backslash)\\). Since angle DAE is equal to angle CAD, and considering triangle ADE where AD is common, angle DAE also measures 30 degrees because angle CAD is half of angle BAD due to symmetry and the given condition. Finally, calculating the exact measure of angle DAE involves recognizing that angle CAD is half of angle BAD, which was determined to be 60 degrees. Hence, angle CAD is 30 degrees. Consequently, angle DAE, being congruent to angle CAD, also measures 30 degrees. This completes our proof, demonstrating that angle DAE indeed measures 30 degrees based on the provided conditions and geometric principles applied throughout the process." + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.74, + 0.743, + 0.755 + ], + "angle": 0, + "content": "Figure 18. A sample constructed by MM-IFEngine pipeline from geo170k dataset" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.139, + 0.101, + 0.357, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.214, + 0.141, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.15, + 0.224, + 0.2, + 0.235 + ], + "angle": 0, + "content": "prompt" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.248, + 0.346, + 0.392 + ], + "angle": 0, + "content": "What might the seabird be doing flying alone over the ocean? Respond in the third-person perspective to maintain an objective viewpoint. Write the answer in the form of a brief nature observation log entry. Each paragraph should contain between 3 and 5 sentences. Write your answer for a general adult audience with an interest in wildlife and nature. Conclude with a single sentence summarizing the likely behavior of the seabird." + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.11, + 0.404, + 0.135 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.41, + 0.118, + 0.452, + 0.129 + ], + "angle": 0, + "content": "Chosen" + }, + { + "type": "text", + "bbox": [ + 0.471, + 0.117, + 0.551, + 0.131 + ], + "angle": 0, + "content": "Score:5/5" + }, + { + "type": "text", + "bbox": [ + 0.383, + 0.149, + 0.623, + 0.374 + ], + "angle": 0, + "content": "In today's observation, a solitary seabird was noted gliding gracefully above the vast expanse of the ocean. The bird's wings were fully extended, indicating efficient use of air currents to conserve energy while searching its surroundings. Seabirds often fly alone when scouting for food sources such as schools of fish or marine organisms near the surface. This lone flight could also serve purposes like territorial patrol or simply enjoying favorable wind conditions. In conclusion, the seabird is most likely engaged in foraging activities." + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.108, + 0.668, + 0.136 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.672, + 0.116, + 0.726, + 0.128 + ], + "angle": 0, + "content": "Rejected" + }, + { + "type": "text", + "bbox": [ + 0.738, + 0.115, + 0.818, + 0.129 + ], + "angle": 0, + "content": "Score:3/5" + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.145, + 0.789, + 0.155 + ], + "angle": 0, + "content": "**Nature Observation Log Entry**" + }, + { + "type": "text", + "bbox": [ + 0.647, + 0.156, + 0.74, + 0.163 + ], + "angle": 0, + "content": "**Date:** [Insert Date]" + }, + { + "type": "text", + "bbox": [ + 0.647, + 0.164, + 0.772, + 0.172 + ], + "angle": 0, + "content": "**Location:** [Insert Location]" + }, + { + "type": "list", + "bbox": [ + 0.646, + 0.145, + 0.789, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.173, + 0.882, + 0.208 + ], + "angle": 0, + "content": "**Observation:** A solitary seabird, possibly a seagull or pelican, is seen soaring gracefully above the vast expanse of the ocean. The bird's wings are spread wide, indicating it is likely gliding or flying at a steady pace." + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.209, + 0.882, + 0.28 + ], + "angle": 0, + "content": "The water below appears calm, with gentle ripples on its surface, suggesting a serene environment. The bird's position and posture suggest it may be searching for food, such as fish or crustaceans, that are abundant in the ocean's depths. The absence of other birds or marine life in the immediate vicinity could imply that this bird is either hunting alone or simply enjoying a solitary flight over the sea." + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.281, + 0.877, + 0.308 + ], + "angle": 0, + "content": "\\*\\*Analysis:\\*\\* Seabirds like seagulls and pelicans are known for their ability to glide and soar on the wind currents, conserving energy while searching for food." + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.309, + 0.875, + 0.356 + ], + "angle": 0, + "content": "The bird's choice of location, over the ocean, indicates its reliance on marine resources for sustenance. The calmness of the water suggests favorable weather conditions, which are essential for successful hunting and gliding. This observation provides insight into the key role that birds play in the formation of such environments." + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.362, + 0.877, + 0.38 + ], + "angle": 0, + "content": "highlighting their adaptability to oceanic environments and their role in the marine ecosystem." + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.429, + 0.825, + 0.443 + ], + "angle": 0, + "content": "Figure 19. A DPO training set sample, where the rejected data is obtained by removing \\(33\\%\\) of the constraints" + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.468, + 0.646, + 0.487 + ], + "angle": 0, + "content": "Prompt to choose verified function and extract params" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.521, + 0.84, + 0.573 + ], + "angle": 0, + "content": "Please analyze the following constraint and select the most appropriate function from the given list to verify this constraint. Then extract the required parameters for the verification function from the constraint." + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.592, + 0.455, + 0.61 + ], + "angle": 0, + "content": "Constraint content: {constraint value}" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.627, + 0.614, + 0.663 + ], + "angle": 0, + "content": "Available verification functions: {all candidate validation function names and parameters}" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.679, + 0.559, + 0.696 + ], + "angle": 0, + "content": "Please complete the analysis following these steps:" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.697, + 0.264, + 0.712 + ], + "angle": 0, + "content": "**Your task:**" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.715, + 0.851, + 0.749 + ], + "angle": 0, + "content": "1. Select the most appropriate verification function from the above list (return empty if none is suitable)" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.75, + 0.767, + 0.784 + ], + "angle": 0, + "content": "2. Extract the required parameters from the constraint based on the function description" + }, + { + "type": "list", + "bbox": [ + 0.147, + 0.715, + 0.851, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.802, + 0.586, + 0.838 + ], + "angle": 0, + "content": "**Please return the result in JSON format as follows:** {output format}" + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.892, + 0.789, + 0.907 + ], + "angle": 0, + "content": "Figure 20. Prompt template for automated verification function selection and paramater extraction" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.16, + 0.293, + 0.451, + 0.316 + ], + "angle": 0, + "content": "Compare Judge Prompt" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.335, + 0.842, + 0.418 + ], + "angle": 0, + "content": "You are an expert in judging whether the response follows the given constraint. Your task is to assess whether the model's response satisfies the given constraint and return True or False. I will provide you with the constraint and the model's response under this constraint. To assist with your evaluation, I will also provide you with the model's response to the same question without the constraint." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.42, + 0.336, + 0.436 + ], + "angle": 0, + "content": "Constraint: {constraint}" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.437, + 0.576, + 0.453 + ], + "angle": 0, + "content": "Response under the constraint: {pred_with Constraint}" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.454, + 0.615, + 0.47 + ], + "angle": 0, + "content": "Response without the constraint: {pred Without constraint}" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.487, + 0.503, + 0.503 + ], + "angle": 0, + "content": "**Please follow the steps below to evaluate**:" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.504, + 0.842, + 0.554 + ], + "angle": 0, + "content": "Step 1. Compare the model's response under the constraint with its response without the constraint. If you believe these two answers are very similar, it means the model has not fully considered the impact of the constraint on the answer. Please return False." + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.555, + 0.858, + 0.62 + ], + "angle": 0, + "content": "Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response does not meet the requirements specified in the constraint, return False. Otherwise, if the response effectively satisfies the constraint, return True." + }, + { + "type": "list", + "bbox": [ + 0.144, + 0.504, + 0.858, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.622, + 0.772, + 0.655 + ], + "angle": 0, + "content": "**Response Format**: Your answer should only include \"True\" or \"False\", and no additional text." + }, + { + "type": "image_caption", + "bbox": [ + 0.332, + 0.718, + 0.665, + 0.733 + ], + "angle": 0, + "content": "Figure 21. Prompt template for Compare Judge Method" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.14, + 0.295, + 0.393, + 0.318 + ], + "angle": 0, + "content": "Direct Judge Prompt" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.339, + 0.837, + 0.374 + ], + "angle": 0, + "content": "Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.376, + 0.633, + 0.392 + ], + "angle": 0, + "content": "Please follow the requirements below to make the judgment:" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.394, + 0.514, + 0.409 + ], + "angle": 0, + "content": "1. Be strict and consistent in your assessment." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.411, + 0.668, + 0.427 + ], + "angle": 0, + "content": "2. You should refer to the content of image to make the judgment." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.428, + 0.851, + 0.444 + ], + "angle": 0, + "content": "3. For one constraint, if the response fails to fully meet the constraint, give it a score of 0." + }, + { + "type": "list", + "bbox": [ + 0.142, + 0.394, + 0.851, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.446, + 0.551, + 0.462 + ], + "angle": 0, + "content": "Otherwise, give it a score of 1. " + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.464, + 0.241, + 0.48 + ], + "angle": 0, + "content": "{prediction}" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.482, + 0.304, + 0.497 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.499, + 0.35, + 0.513 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.516, + 0.278, + 0.532 + ], + "angle": 0, + "content": "{constraints_str}" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.534, + 0.337, + 0.548 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.551, + 0.841, + 0.584 + ], + "angle": 0, + "content": "You should judge and explain for each constraint in the constraint list without omitting any constraint. Finally, list scores of all the constraints in one sentence." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.586, + 0.494, + 0.601 + ], + "angle": 0, + "content": "You should strictly follow the format below:" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.604, + 0.253, + 0.619 + ], + "angle": 0, + "content": "Judgement: ..." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.621, + 0.833, + 0.655 + ], + "angle": 0, + "content": "Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of constraint_n: x/1." + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.712, + 0.656, + 0.726 + ], + "angle": 0, + "content": "Figure 22. Prompt template for Direct Judge Method" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.2, + 0.224, + 0.797, + 0.737 + ], + "angle": 0, + "content": "
Main ClassSubclassEvaluationDescriptionExample
A. Rhetoric & LogicA.1 Rhetoric requirementsCompare JudgeConstraint that requires the response to use a specific rhetorical technique.“Your output should include a metaphor.”
A.2 Logical relationDirect JudgeConstraint that ensures logical cohesion within the response by requiring specific logical connectors or structures.“Each paragraph must contain at least one cause-and-effect relationship.”
B. Format limitB.1 Natural languageDirect JudgeConstraint specifying which natural language(s) should be used in the response.“Please answer in Spanish.”
B.2 Part of speechDirect JudgeConstraint that requires the response to use a specific part of speech.“Use at least three adjectives in your response.”
B.3 Sentence structureDirect JudgeConstraint that specifies special sentence structures to be used in the response.“Write each sentence so it includes a parenthetical phrase.”
B.4 Tense requirementsDirect JudgeConstraint that specifies the use of multiple tenses within the response.“In past tense totally.”
B.5 PunctuationRule-baseConstraint specifying unconventional yet feasible punctuation usage in the response.“Replace all periods with semicolons.”
B.6 HighlightDirect JudgeConstraint that specifies a unique but manageable method for highlighting text.“Use **bold** for every noun.”
B.7 Title requirementsDirect JudgeConstraint that specifies how titles should be added to the response.“Provide a concise title that summarizes the main idea.”
B.8 Style requirementsCompare JudgeConstraint that specifies an unconventional or distinctive writing style for the response.“Write the answer in the form of a brief detective story.”
B.9 Case requirementsDirect JudgeConstraint specifying an unusual yet readable approach to letter case in the response.“Write all nouns in UPPERCASE and all adjectives in lowercase.”
B.10 Unstrict formatDirect JudgeConstraint specifying a unique format for the output while keeping it approachable.“Format your response as a short play script with speaker labels.”
B.11 Strict formatDirect JudgeConstraint that requires the response to follow a strictly defined format.“Please provide the output as well-formed XML with custom tags.”
B.12 Number and ListDirect JudgeConstraint for using numbered or bulleted lists in the response.“Present all key points as a numbered list with bulleted sub-lists.”
B.13 Wrap upDirect JudgeConstraint that requires a concise, well-structured summary or conclusion.“Provide a final paragraph summarizing the key arguments.”
B.14 First letterDirect JudgeConstraint specifying a pattern for the first letters of sentences or paragraphs.“Each sentence should begin with a letter that progresses through the alphabet.”
C. Text Length limitC.1 Paragraph limitRule-baseConstraint that specifies the number of paragraphs in the response.“Your response must consist of exactly 4 paragraphs.”
C.2 Sentence limitRule-baseConstraint that specifies the number of sentences in each paragraph.“Totally use 5 sentences in your response.”
C.3 Word limitRule-baseConstraint that specifies a small range for the total number of words in the text.“Your response must be a single word or phrase.”
D. Math limitD.1 PrecisionRule-baseConstraint that specifies the level of precision required in mathematical calculations.“Keep two decimal places for all numbers in the answer.”
D.2 Scientific notationRule-baseConstraint that requires the use of scientific notation for large or small numbers.“Express all numbers greater than 1,000 in scientific notation.”
E. Action limitE.1 Role imitationCompare JudgeConstraint requiring the response to imitate the tone and style of a specific role or public figure.“Please answer in the style of a sports commentator.”
E.2 Prefix and SuffixRule-baseConstraint that requires the response to begin or end with a specific phrase or symbol.“Please start your answer with ‘Once upon a time...’”
E.3 Tone requirementCompare JudgeConstraint specifying an emotional tone for the response.“Write your answer in a positive and encouraging tone.”
E.4 PerspectiveDirect JudgeConstraint that specifies a narrative perspective for the response.“Write your answer in the first-person singular as a personal account.”
E.5 Target audienceCompare JudgeConstraint requiring the response to be tailored for a specific audience.“Craft your response as if explaining to high school students.”
E.6 SituationCompare JudgeConstraint requiring the response to be set in a specific situation or scenario.“Answer as if you are giving safety instructions before a flight.”
E.7 Prior conditionDirect JudgeConstraint stating that when a specific condition is met, the response must follow a particular process.“If the user requests legal advice, begin with a disclaimer.”
F. KeywordF.1 MentionRule-base & Direct JudgeConstraint that requires including a specific keyword a certain number of times.“Mention ‘GreenTech’ exactly three times throughout.”
F.2 Not mentionRule-base & Direct JudgeConstraint that requires avoiding specific keywords or phrases.“Do not mention the words ‘budget’ or ‘investment’.”
F.3 Multiple mentionRule-base & Direct JudgeConstraint requiring including multiple specified keywords in a balanced manner.“Mention both ‘sustainability’ and ‘renewable energy’ at least twice.”
F.4 Keyword variationDirect JudgeConstraint requiring the use of synonyms or variations of a given keyword.“Use at least three synonyms for ‘innovation’ throughout your text.”
" + }, + { + "type": "table_caption", + "bbox": [ + 0.287, + 0.748, + 0.71, + 0.761 + ], + "angle": 0, + "content": "Table 5. Constraint Categories and Evaluation Methods for MM-IFEval" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.218, + 0.324, + 0.781, + 0.637 + ], + "angle": 0, + "content": "
CategoryInstruction
Descriptive AnalysisDescribe the animal's typical habitat, diet, and one unique behavioral trait.
Provide a detailed analysis of the image, including the setting, characters, and notable objects.
Explain the activity taking place in the image.
Describe the activities of the person on the left in the image.
Emotional & PerspectiveWhat emotions do you think the person in this image might be feeling?
Imagine you are the person on the left in the scene depicted in this image, write a story about what you would do next.
Personify the sign in the image and express its feelings about the rule it presents.
Creative WritingCreate a short conversation between any two individuals in the scene.
Pretend this snapshot belongs to a larger story. Write a quick paragraph setting up the next plot twist.
Use this picture as your muse. Craft a brief poem—any style—that captures the emotion you sense.
Turn this scene into a short children's story focusing on wonder and curiosity.
Write a short poem with two stanzas, inspired by the emotion or content depicted in this image.
Social Media & ContentAssume this is an image you are about to post on Twitter. Please provide a short, upbeat caption describing it.
Assume you are creating a Pinterest pin with this image. Write a short inspirational or motivational caption to accompany it.
If this image were promoting an upcoming event, compose a quick announcement with the date, a highlight of what to expect, and a call-to-action.
Role PlayImagine you are the photographer who took this picture. Briefly explain why you chose to capture this particular moment and what story you hope it conveys.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.387, + 0.649, + 0.61, + 0.662 + ], + "angle": 0, + "content": "Table 6. Task Pool for MM-IFEngine" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.166, + 0.226, + 0.833, + 0.734 + ], + "angle": 0, + "content": "
Verified Function NameFunction ParametersConstraint ExampleParameter Example
check Whether\\_responseParagraph\\_number_in_rangelower_bound:int,upper_bound:intThe number of text paragraphs be at least 3[3, 10000]
check Whether\\_response\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences be exactly 3[3, 3]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences in each paragraph be less than 3[0, 2]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_range_listranges:List[tuple]The number of sentences in the first paragraph be exactly 3, and in the second paragraph be at most 2[(3, 3), (1, 2)]
check Whether\\_each\\_paragraph\\_sentence\\_number_exceedsexceed_num:int,upper_bound:intEach new paragraph should have 1 sentence more than the previous one, no paragraph exceeds 7 sentences[1, 7]
check Whether\\_response_word_count_in_rangelower_bound:int,upper_bound:intThe number of words should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_rangelower_bound:int,upper_bound:intThe number of words in each paragraph should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_range_listranges:List[tuple]The number of words in the first paragraph be between 20 and 30, in the second between 50 and 80[(20, 30), (50, 80)]
check Whether\\_whole\\_response_notContain_certain_substringsubstring:strThe response should not contain the word "apple"["apple"]
check Whether\\_whole\\_response_notContain_certain_substringssubstrings:List[str]The response should not contain the words "apple" and "banana"[["apple", "banana"]]
check Whether\\_each\\_sentence_begin_with_certain_substringsubstring:strEach sentence should start with exclamation point["!"]
check Whether\\_each\\_sentence_end_with_certain_substringsubstring:strEach sentence should end with "apple"["apple"]}
check Whether\\_whole\\_response_begin_with_certain_substringsubstring:strThe response should start with "apple"["apple"]}
check Whether\\_whole\\_response_end_with_certain_substringsubstring:strThe response should end with "apple"["apple"]}
check Whether\\_keywords_metioned_in_rangekeywords:List[str], lower_bound(times:int, upper_bound(times):intThe response should mention the word "apple" at least 3 times[["apple"], 3, 10000]
check_number_precision_in_responseprecision:intThe numbers in the response should have 2 decimal places[2]
check Whether has no\\_number_in_response-The response should not contain any number[]
check Scientific_notation\\_precision_in_responsesignificantDigits:intThe numbers in the response should have 3 significant digits[3]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.745, + 0.735, + 0.757 + ], + "angle": 0, + "content": "Table 7. Verification Functions for rule-based evaluation method in MM-IFEval" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "17" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_origin.pdf b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..745e5841296c4eacdc072432bc90af464d0138d7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99a1dd220829f6c950e527e452bc9b5cd20c94ff1ae64aef77d703f705b3ebdd +size 6629907 diff --git a/data/2025/2504_07xxx/2504.07957/full.md b/data/2025/2504_07xxx/2504.07957/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d98f92c09e4d95e21a187fd96ba959f86efce891 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/full.md @@ -0,0 +1,847 @@ +# MM-IFEngine: Towards Multimodal Instruction Following + +Shengyuan Ding $^{1,2*}$ , Shenxi Wu $^{1,2*}$ , Xiangyu Zhao $^{2,3}$ , Yuhang Zang $^{2\boxtimes}$ , Haodong Duan $^{2}$ , Xiaoyi Dong $^{2}$ , Pan Zhang $^{2}$ , Yuhang Cao $^{2}$ , Dahua Lin $^{2,4,5}$ , Jiaqi Wang $^{2,6\boxtimes}$ $^{1}$ Fudan University $^{2}$ Shanghai AI Laboratory $^{3}$ Shanghai Jiaotong University $^{4}$ The Chinese University of Hong Kong $^{5}$ CPII under InnoHK $^{6}$ Shanghai Innovation Institute + +![](images/f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg) +(a) Current MMIF Bench +1. Answer as if you are facing to the audience. +2. Use No more than 60 words.... +Figure 1. (a) Limitations of existing Multimodal Instruction Following (IF) benchmarks. (b) Overview of the MM-IFEval benchmark, which significantly surpasses existing benchmarks in terms of constraint diversity, quantity, and instruction complexity. Our benchmark consists of Compose-Level (C-Level) problems that impose constraints on model outputs (e.g., format requirements, keyword limits) and Perception-Level (P-Level) problems that require reasoning about specific visual elements in images. (c) Our MM-IFEngine generates a large-scale, diverse training dataset suitable for both Supervised Fine-Tuning (SFT) and Direct Preference Optimization (DPO). + +![](images/ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg) +Various & Abundant + +![](images/a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg) +Constraints +MTA-Bench (About 1k constraints) +(300 questions) + +![](images/a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg) +(b) MM-IFEval Benchmark +follow instruction + +To Say +you are the musician +image. Write about your +s and feelings while +ing. + +![](images/1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg) +Constraints + +![](images/9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg) +32 Categories of Constraints +5.1 Average constraints + +![](images/01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg) +3 Evaluation metrics combined + +![](images/9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg) + +![](images/b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg) +400 high-quality samples +100 Perception-Level + +To See letters can you identify covering the right half e poster? Output in from top to bottom and o right separated with ' + +R,e,a,d,i,n,a,f,u + +![](images/5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg) +Instruction +Ground Truth + +![](images/c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg) +(c) MM-IF Dataset SFT & DPO +23kData generated with our MMTE Engine + +![](images/75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg) +Compose a brief poem inspired by the cozy and serene. + +![](images/811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg) +Each stanza should have 4 lines. Your output should include a metaphor +In a room where light gently plays, +A haven carved from nature's ways. +The river whispers calm and clear, +Serene as thoughts that banish fear... + +![](images/a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg) +In a world of hustle and bustle +A haven of peace and solitude. +Soft curtains dance in the breeze. As the sun's rays gently caress. + +![](images/782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg) + +![](images/253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg) + +# Abstract + +The Instruction Following (IF) ability measures how well Multi-modal Large Language Models (MLLMs) understand exactly what users are telling them and whether they are doing it right. Existing multimodal instruction following training data is scarce, the benchmarks are simple with atomic instructions, and the evaluation strategies are imprecise for tasks demanding exact output constraints. To address this, we present MM-IFEngine, an effective pipeline to generate high-quality image-instruction pairs. Our MM-IFEngine pipeline yields large-scale, diverse, and high-quality training data MM-IFInstruct-23k, which is suitable for Supervised Fine-Tuning (SFT) and extended as MM-IFDPO-23k for Direct Preference Optimization (DPO). We further introduce MM-IFEval, a challenging and diverse multi-modal instruction-following benchmark that includes (1) both compose-level constraints for output re + +sponses and perception-level constraints tied to the input images, and (2) a comprehensive evaluation pipeline incorporating both rule-based assessment and judge model. We conduct SFT and DPO experiments and demonstrate that fine-tuning MLLMs on MM-IFInstruct-23k and MM-IFDPO-23k achieves notable gains on various IF benchmarks, such as MM-IFEval $(+10.2\%)$ , MIA $(+7.6\%)$ , and IFEval $(+12.3\%)$ . We have fully open-sourced the datasets (both SFT and DPO), evaluation code and training scripts at https://github.com/SYuan03/MM-IFEngine. + +# 1. Introduction + +Instruction Following (IF) is a fundamental ability in Large Language Models (LLMs) [14, 27, 35, 53, 57] and Multimodal Large Language Models (MLLMs) [2, 34], which involves accurately interpreting and executing user-provided instructions. This ability is crucial for deploying models in real-world applications where users expect precise and context-aware responses, such as code + +generation [44], visual question answering [17], robots [38], and creative content creation [58]. For instance, in a VQA scenario, when a user asks an MLLM what is the object and how do I use it, return the object name and the usage instructions in a JSON format, accurate IF ensures the model provides a response like {object': 'hammer', 'usage': 'use it to drive nails'} instead of the plain text. + +Achieving precise IF in multimodal, diverse, and open-ended environments presents significant challenges for both model training and benchmark evaluation. One significant limitation is the scarcity of high-quality IF training data to train open-source MLLMs. In addition, current multimodal IF benchmarks [2, 34] merely have simple, atomic instructions, and the constraints are weakly correlated with visual content (see Fig. 1 (a)). Consequently, existing benchmarks lack the diversity required for real-world applications, leading to saturated results where nearly all models achieve over $80\%$ . Furthermore, the evaluation method in existing benchmarks often relies on LLM-as-a-judge [56], which is imprecise for instructions demanding exact output constraints, such as word counts. Therefore, the combination of limited training data, simple benchmarks, and imprecise evaluation strategy strongly restricts the progress of current MLLMs in IF. + +To address the lack of high-quality IF training data and challenging benchmarks, we propose MM-IFEngine, an effective pipeline for generating high-quality image-instruction pairs. MM-IFEngine collects diverse image sources, including natural scenes, UI interfaces, diagrams, charts, and mathematical problems. We then employ a structured approach using a predefined set of 16 task descriptions and 32 constraints to guide the LLM in crafting tailored instructions for each image. Using MM-IFEngine, we generated a comprehensive dataset of image-instruction pairs, collected responses from open-source MLLMs, and applied rigorous post-processing to retain only high-quality instruction-answer pairs, thus constructing MM-IFInstruct-23k for Supervised Fine-Tuning (SFT). We also generate negative responses by selectively removing constraints from the original data, constructing the preference dataset MM-IFDPO-23k for preference optimization algorithms such as Direct Preference Optimization (DPO) [36]. + +To facilitate the evaluation of multimodal IF, we present MM-IFEval, a benchmark comprising 400 challenging problems with diverse compose-level and perception-level instructions. MM-IFEval is derived from the images and instructions generated by MM-IFEngine with human-labeled annotations. As presented in Fig. 1 (b), our MM-IFEval has the following three distinctive features: (1) Diverse Instruction Types: MM-IFEval has 32 distinct constraints, ensuring a wide range of instruction complexities and surpassing the scope of prior benchmarks. (2) Hybrid Evaluation: we use + +a hybrid strategy including both rule-based verification and judge model. For subjective instructions (e.g., mimicking tone), we design a comparative judgment for precise evaluation. Specifically, a control output is generated without the constraint, and the LLM judge compares both outputs for precise evaluation. (3) Challenging: the leading proprietary model (GPT-4o at $64.6\%$ ) and open-source model (Qwen2-VL-72B at $50.8\%$ ) demonstrating substantial room for improvement on our benchmark, highlights a significant opportunity for improvement in multimodal instruction following. + +We further demonstrate that fine-tuning MLLMs on either MM-IFInstruct-23k or MM-IFDPO-23k consistently boosts the performance of MLLMs on instruction following benchmarks, without compromising their original capabilities on other Visual Question Answering (VQA) benchmarks. Specifically, fine-tuning Qwen2-VL-7B on MM-IFDPO-23k with the DPO results in performance gains of $10.2\%$ , $7.6\%$ , and $12.3\%$ on MM-IFInstruct-23k, MIA-Bench [34], and IFEval [57], respectively. + +Our contributions include: (1) a MM-IFEngine pipeline for generating multimodal constraint-rich image-instruction pairs; (2) a large-scale training dataset MM-IFInstruct-23k and preference optimization dataset MM-IFDPO-23k derived from MM-IFEngine; (3) a challenging multimodal instruction following benchmark MM-IFEval with diverse constraints and comprehensive evaluation approaches; and (4) empirical evidence showing significant performance gains on both our MM-IFEval and existing benchmarks when training MLLMs on MM-IFInstruct-23k via SFT and MM-IFDPO-23k via DPO. + +# 2. Related Work + +Instruction Following in LLMs. Various benchmarks and training approaches have been proposed to make Large Language Models (LLMs) better align with human instructions. While existing Instruction Following (IF) benchmarks like [14, 35, 53, 57] all aim to evaluate instruction following, they differ significantly in their dataset construction pipelines, driven by their unique constraint taxonomies. CFBench [53], for instance, constructs its dataset using a combination of taxonomic and statistical methodologies to establish comprehensive constraints. This divergence extends to their evaluation strategies. For example, InFoBench [35] adopts a strategy of decomposing complex instructions into simpler assessment standards. Beyond benchmarks, various training approaches aim to enhance LLMs' instruction-following capabilities [29, 44], including in-context learning [58] and preference optimization [54]. However, he aforementioned research is limited to the text modality, whereas our work focuses on multi-modal instruction following with vision inputs. + +Instruction Following Benchmarks in MLLMs. Numerical + +![](images/779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg) +Figure 2. Overall pipeline of MM-IFEngine. Part (a) demonstrates the three-stage workflow of our engine: (1) Image filter; (2) Task generation using GPT-4o for images without QA pairs and instruct refinement for existing annotations; and (3) Constraints integration incorporating 6 main categories and 32 subcategories, ensuring compatibility between constraints and tasks. MM-IFEngine is employed to generate SFT and DPO training datasets and MM-IFEval benchmark, as shown in part (b) and (c). MM-IFEval implements three evaluation metrics combining rule-based verification functions and a judge model to ensure accurate assessment. + +![](images/22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg) + +![](images/e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg) + +ous benchmarks [18] have been proposed to evaluate diverse capabilities of Multi-modal Large Language Models (MLLMs), including general knowledge [5, 24, 48, 50], document understanding [15, 25, 30], perception [43, 52], multi-image comprehension [26, 39, 40], and instruction following (IF) [2, 34]. MIA-Bench [34] and VisIT-Bench [2] are representative IF benchmarks that employ GPT-4 [32] for question generation and evaluation. In contrast to existing IF benchmarks, our MM-IFEval introduces significant improvements in diversity (32 constraint categories covering compositional and perceptual aspects), difficulty (averaging 5.1 constraints per question), and evaluation precision (using both judge models and rule-based verification). + +Instruction Tuning Data for MLLMs. Recent advancements in multi-modal instruction tuning data aim to improve cross-modal alignment and increase the variety of tasks handled by MLLMs [4, 8, 20, 26, 45, 46, 51]. For example, some previous works [3, 4, 23] build synthetic instruction tuning data generated using GPT-4V [33], enabling open-source MLLMs to achieve performance comparable to proprietary models across multiple benchmarks. However, existing instruction tuning data are mainly designed for general knowledge or visual perception, and data for + +improving the IF abilities is scarce. The scarcity of training data for enhancing IF abilities motivated the development of our MM-IFEngine pipeline. + +# 3. MM-IFEngine + +We employ the MM-IFEngine pipeline to generate image-instruction pairs, which are the foundation for creating instruction tuning data and our benchmark. As shown in Fig. 2 (a), the pipeline is composed of three main stages: (1) image filtering, where we systematically select a diverse set of images from multiple sources to ensure broad coverage of visual content; (2) task generation, in which we either synthesize novel tasks tailored to the selected images or refine existing instruction templates to better align with the image content; and (3) constraint integration, where high-quality, constraint-aware instructions are generated for images that initially lack associated annotated guidance, thereby enhancing the richness and precision of the dataset. + +# 3.1. Image Filter + +Our image filtering strategy selects only high-quality images by removing those with low resolution or limited semantic + +richness. For unannotated pure image datasets (e.g., CC3M [37]), we prioritize natural scene images. Rich semantic content in these images enables the creation of more comprehensive and insightful QA pairs, which is crucial for designing diverse and complex instruction following tasks. We use the IC9600 and RAM metric proposed in the previous method [55] to select the images that have rich semantic content. + +Furthermore, we analyze existing annotated datasets, such as ALLaVA [3]. Our analysis reveals that some images suffer from low resolution, making them inadequate for the instruction-following task. Given our intention to design more intricate and varied instruction following tasks based on this data, we filter out data items containing low-quality images. + +# 3.2. Task Generation + +Image Source without Original QA Pairs. For image datasets lacking original annotated task instructions (e.g., CC3M [37]), we first design appropriate task instructions for the data items. We first develop a series of task instructions tailored to the data items. These instructions are crafted to elicit long-form responses that can be subsequently modified or refined using various constraints, for instance, Provide a detailed analysis of the image, including the setting, characters, and notable objects. The final task pool $\mathcal{P}_T$ comprises a total of 16 distinct tasks, with further details available in Appendix A.1.2. + +Given the task pool $\mathcal{P}_T$ , we randomly select $k$ tasks as examples of task types for each image $I$ . We then prompt a powerful language model $\mathcal{M}$ (e.g., GPT-4o) to generate an appropriate task list $T_l$ that aligns with the image content. The process is formulated as: + +$$ +\left\{T _ {l} ^ {*} \right\} = \mathcal {M} \left(I, T _ {e}\right) \tag {1} +$$ + +where $T_{e} = \{T_{1}, T_{2}, \ldots, T_{k}\}$ and each $T_{i} \in \mathcal{P}_{T}$ . The model $\mathcal{M}$ is tasked with either choosing relevant tasks from $T_{e}$ or supplementing reasonable tasks to construct the appropriate task list $T_{l}^{*}$ , ensuring that all tasks in $T_{l}^{*}$ are in line with the image content. After generating the $T_{l}^{*}$ , a sampling step is incorporated to guarantee task diversity. For each image, tasks are sampled. This sampling process is crucial as it enriches the variety of tasks associated with each image. + +Image Source with QA Pairs. In the case of image datasets that have QA pairs (e.g., ALLaVA [3]), we adopt certain strategies for processing the original question annotations. We choose ALLaVA as the primary dataset for this type of image source due to its rich and diverse image content, which is accompanied by a variety of task types. First, we conduct an analysis of the original question annotations. We find that some of the questions are accompanied by some few-shot examples. Additionally, some questions in ALLaVA have options in their original annotations, which are not + +suitable for our instruction-following task. Since we need to incorporate certain constraints into the original instructions in the subsequent steps, we use regular expressions and length limits to filter the questions in ALLaVA. Specifically, we select those questions that do not have few-shot examples associated with them. Mathematically, if we let $Q$ be the set of all questions in ALLaVA, $Q_{fs}$ be the subset of questions with few-shot examples, and $Q_{op}$ be the subset of questions with options. We aim to find the subset $Q_{s}$ of questions that satisfy the conditions: + +$$ +Q _ {s} = \left\{q \in Q | q \notin Q _ {f s} \wedge q \notin Q _ {o p} \right\} \tag {2} +$$ + +where the filtering based on the absence of few-shot examples and options is achieved using regular expressions and length limits. Then, we get the expected $T^{*}$ in our filter $Q_{s}$ set for the images. + +# 3.3. Constraints Integration + +Constraints Pool $(\mathcal{P}_C)$ We use instruction to refer to the entire textual input, which in our paper can generally be viewed as a composition of a task instruction and multiple constraints instruction. Tasks and constraints are rich and diverse, with a certain complexity in our work. All the constraints in our work can be further classified into six major categories, each with its own unique characteristics and applications: Text Length Requirements, Mathematical Requirements, Language & Formatting Requirements, Rhetoric & Logic Requirements, Action Requirements, and Keyword Requirements. Please refer to the Appendix Fig. 5 for more details of all the constraints. + +Given the constraints pool $\mathcal{P}_C$ and task instructions, a straightforward approach for composing full instruction is to first set several constraints for each constraint type and then randomly select one constraint from some of the types to compose the constraint list, and finally concatenate the constraint list with the task instruction to form the full instruction. But this direct method has two problems: (1) The constraints are not diverse enough, which may not be able to fully evaluate the ability of the model. (2) The contradiction between the constraints and also between the constraints and the task instruction may exist. For the first problem, an LLM is employed to generate concrete content of constraint instruction for the specific constraint type in our method. In order to avoid the generated content being too divergent or hard to control its difficulty, we carefully design some cases or requirements of details that needed to be paid attention to when generating the content for each constraint type (Appendix A.1.1). For the second problem, we also use a powerful LLM to help keep the correlation of constraints with its instruction and filter out those that cause total contradiction. Finally, we prompt an LLM to check whether the constraints and the task instruction are compatible and filter out those failing to pass the check. Our method not only + +ensures the compatibility of constraints and instructions but also enriches the diversity of constraints. + +In our actual practice process, we find that although we prompt the LLM to select appropriate constraints that should be compatible with the task instruction and other constraints, the generated constraints still have some contradiction with the task instruction, especially on those existing datasets with various kinds of annotations. The reason is that these datasets are designed for overall question-answering tasks, and the question(or named task instruction) tends to be contradictory with the constraints, which are mostly compatible with those tasks of creating or answering in non-short form. So, we decouple the selection and generation steps for this type of data source. Specifically, we first select the constraints from the constraints pool $\mathcal{P}_C$ and then provide the selected mostly compatible constraints to the LLM to select secondly and generate final constraints. But for image datasets without original QA pairs, in other words, for which we generate task instructions for them using $\mathcal{P}_T$ , we directly sample k constraint types for the LLM to generate concrete content because they are mostly compatible with the pre-designed task instruction. The uniform process is formulated as: + +$$ +C _ {l} ^ {*} = \mathcal {L} \left(C _ {s}, T ^ {*}\right), C _ {f} ^ {*} = \mathcal {V} \left(C _ {l} ^ {*}, T ^ {*}\right) \tag {3} +$$ + +where $\mathcal{T}^*$ is the task applicable to the image. The model $\mathcal{L}$ is tasked with both choosing appropriate constraint types from $C_s$ again and generating concrete constraints for some of them, whose output is a list of concrete constraint descriptions. To ensure that the generated constraints remain compatible with the given task instruction $T^*$ , we employ a final validation step using another LLM process, denoted as $\mathcal{V}$ . This validation function checks whether each constraint in $C_l^*$ aligns with $T^*$ and filters out those that contradict or do not fit the task instruction. The resulting set of fully verified and compatible constraints is represented as $C_f^*$ . + +MM-IFInstruct-23k Construction. By applying the MM-IFEngine pipeline, we construct the MM-IFInstruct-23k dataset, which contains 23k high-quality multi-modal instruction-following training data. We first take an analysis of the performance of the current open-source MLLMs and proprietary MLLMs on several benchmarks [25, 34], and find that for instruction-following capability, the most powerful open-source MLLM like InternVL2.5-78B-MPO [42] is nearly equivalent to GPT-4o, and the performance on general VQA benchmarks are even higher than GPT-4o. Thus, we use InternVL2.5-78B-MPO to generate responses for our MM-IFInstruct-23k dataset. Despite its capabilities, the InternVL2.5-78B-MPO model encounters difficulties in ensuring $100\%$ compliance with our constraints, a challenge attributed to the complexity, number, and comprehensiveness. Consequently, we implement a post-processing stage to filter out responses that do not meet the specified criteria. Acknowledging that achieving perfect constraint adherence + +might be challenging even for human annotators on this task, we set a practical accuracy threshold of $80\%$ . Finally, our MM-IFInstruct-23k comprises 23k data items, with 16k constructed from the training set of CC3M, 6k from ALLaVA, and 4k from the training set of MultiUI, Geo170k[12] and ChartQA[31]. We show the distribution of constraints number of MM-IFInstruct-23k in Fig. 3. + +MM-IFDPO-23k Construction. To comprehensively explore and make full use of our high-quality data, we also utilize MM-IFEngine to construct MM-IFDPO-23k, a preference dataset comprising chosen and rejected samples suitable for Direct Preference Optimization (DPO) [36]. Our high-quality data can be directly employed as the chosen samples. Regarding rejected samples, we opt to utilize Qwen2-VL-7B-Instruct to answer the variant of the question for generating rejected pairs. Specifically, we have four distinct settings for generating negative pairs, which mainly differ in the input to Qwen2-VL-7B-Instruct. These settings include (1) With image, but randomly remove one-third of the number of constraints in the prompt; (2) With image, but randomly remove two-thirds of the number of constraints in the prompt; (3) With image, but randomly remove all the constraints in the prompt; and (4) Full prompt, but without the image; We use these four types of input to feed into Qwen2-VL-7B-Instruct model, and collect the rejected responses to construct the MM-IFDPO-23k. + +# 4. MM-IFEval + +Existing benchmarks for multi-modal instruction following are scarce. The majority focus on simple and atomic instructions, resulting in performance saturation across models. To address this limitation, we introduce MM-IFEval, a human-annotated, comprehensive, and challenging benchmark designed for evaluating multi-modal IF. + +# 4.1. MM-IFEval Construction + +To construct the MM-IFEval, we first use our MM-IFEngine to generate the question-answer (QA) pairs for images. The generated instructions may inherently contain potential conflicts. Consequently, human annotation remains critical for constructing this benchmark, as human annotators possess the cognitive capacity for comprehensive assessment of these complex situations. After the human annotation, we further use an extra post-processing step that prompts the LLMs to double-check and mitigate the occurrence of constraint conflicts as much as possible. Finally, we construct the MM-IFEval bench of 400 questions, 300 of which are compose-level open-ended questions and 100 perception-level questions with ground truth. + +Diverse Constraints. With 32 distinct constraint categories and an average of 5.1 constraints per question, MM-IFEval presents a more challenging evaluation task compared to earlier benchmarks (e.g., [34], which has 8 categories and 2.6 + +![](images/e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg) +Figure 3. Constraint Quantity Distribution in MM-IFInstruct-23k. Our MM-IFInstruct-23k exhibits systematic variation in constraint complexity, with each sample containing 3-12 constraints per instruction. + +average constraints per question). Furthermore, our benchmark incorporates essential constraints such as "Output in JSON format", which is prevalent and practical in real-world scenarios, a feature not found in previous multi-modal instruction following benchmarks. + +Compose-level and Perception-level Questions. compose-level questions involve textual constraints, while perception-level questions require greater visual perception ability to solve. The perception-level questions incorporate a variety of image sources, such as natural scenes, user interfaces, diagrams, table charts, and mathematical expressions, which we believe are representative of real-world applications. Please refer to the Appendix for examples of compose-level and perception-level questions. + +# 4.2. Hybrid Evaluation + +Current multi-modal instruction following benchmarks often rely solely on GPT-4o for evaluation. However, accurately assessing certain constraints, such as numerical conditions (e.g., 'output in 200 words', 'Answer in 5 paragraphs', 'Use the word 'cat' in the answer twice'), remains challenging even for GPT-4o. In contrast, verifiable functions like string matching offer greater precision than judge models for such constraints. To address this, we propose a hybrid evaluation strategy (see Fig. 2(c)) that employs three methods, including both rule-based Verification and judge models for more robust and precise evaluation. + +(1) Rule-based Verification. For constraints that adhere to a fixed format and involve specific content that can be objectively verified—yet remain challenging for an LLM to assess accurately—we employ a rule-based approach. Specifically, we design a set of predefined functions for different con + +![](images/514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg) +Figure 4. Constraint Category Distribution inCompose-Level Problems of MM-IFEval. This part comprises six primary constraint categories with 32 subcategories, forming a multi-level taxonomy for instruction-following evaluation. + +strand types. The LLM is first prompted to extract the relevant parameters, denoted as Params, from the constraint description. When evaluating a constraint that falls within the scope of our rule-based framework, we use Params and the model's output as inputs to the predefined function to determine compliance. + +(2) LLM-based Direct Judgment. This method is primarily used for evaluating constraints that can be easily and unambiguously verified based on the model's output. It is applicable to constraints where correctness is straightforward to determine, such as those requiring the inclusion of specific words or phrases. For instance, a constraint like "Use the word 'inspiration' or its synonyms at least twice in the response" does not follow a strict format and cannot be assessed using a rule-based approach. Instead, we directly leverage an LLM to determine whether the constraint is satisfied. + +(3) LLM-based Comparative Judgment. Some constraints, particularly those related to tone, style, or role-playing, are difficult to evaluate directly. To improve judgment accuracy, we adopt a comparative approach. Specifically, we generate a second model output using a nearly identical prompt but without the constraint under evaluation. The LLM-based evaluator is then provided with both outputs and asked to compare them, determining whether the model's response with the constraint in the prompt adheres more closely to the expected requirement. + +# 5. Experiments + +Benchmarks. We select the following benchmarks to demonstrate that models fine-tuned on MM-IFInstruct-23k and MM-IFDPO-23k enhance instruction following without compromising performance on other VQA tasks: (1) + +Table 1. Main results on Instruction Following benchmarks, including our proposed MM-IFEval, MIA-Bench [34], and IFEval [57]. The symbol ${}^{\mathrm{M}}$ refers to multimodal benchmarks,and ${}^{\mathrm{T}}$ denotes text-only benchmarks. We report both compose-level ("C") and perception-level ("P") for MM-IFEval,prompt-level accuracy ("Prompt.")andInst-level accuracy ("Inst.")for IFEval,and the averaged results across all three benchmarks in the rightmost column. + +
ModelParameterMM-IFEvalM(ours)MIA MIFTAvg.
CPAvg.Prompt.Inst.Avg.
LLaVA-NeXT-7B [21]7B36.816.031.673.232.043.337.747.5
LLaVA-OneVision-Qwen2-7B-OV [16]8B37.424.034.084.543.354.849.055.8
InternVL2-8B [7]8B45.232.041.986.244.657.050.859.6
InternVL2.5-8B [6]8B49.636.046.288.552.262.457.364.0
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.783.345.056.450.757.9
w. MM-IFInstruct-23k-59.319.049.2 +9.586.5 +3.250.861.856.3 +5.664.0 +6.1
w. MM-IFDPO-23k-58.721.049.3 +9.690.0 +6.764.573.769.1 +18.469.5 +11.6
Qwen2-VL-7B-Instruct [41]8B42.740.042.080.542.452.547.456.6
w. MM-IFInstruct-23k-57.038.052.3 +10.387.7 +7.246.858.452.6 +5.264.2 +7.6
w. MM-IFDPO-23k-55.243.052.2 +10.288.1 +7.655.264.359.7 +12.366.7 +10.1
+ +Table 2. Main results on VQA benchmarks, including general knowledge (MMMU [50], MMBench [24], MMStar [5], MMT-Bench [48]), document understanding (AI2D [15], OCRBench [25]), Chat (MMVet [49]) and Hallusion (POPE [19]). Fine-tuning models on MM-IFDPO-23k achieve comparable performance across these benchmarks. + +
ModelGeneralDocumentChatHallusion
MMMUvalMMBenchdevMMStarMMT-BenchvalAI2DOCRBenchMM VetPOPEAvg.
LLaVA-NeXT-Llama3-8B [21]43.772.543.653.173.155.043.387.258.9
w. MM-IFInstruct-23k45.869.344.253.371.255.346.388.859.3
w. MM-IFDPO-23k44.172.143.753.172.356.743.986.859.1
Qwen2-VL-7B-Instruct [41]53.981.060.863.282.986.763.386.372.3
w. MM-IFInstruct-23k54.079.357.161.081.681.861.689.270.7
w. MM-IFDPO-23k54.081.358.563.783.386.866.185.772.4
+ +Instruction Following benchmarks, including MIA-Bench [34], IFEval [57], and our proposed MM-IFEval. To be noted, IFEval is a language-only benchmark while others are both multi-modal benchmarks. (2) VQA Benchmarks, including MMMU [50], MMBench [24], MMStar [5], AI2D [15], OCRBench [25], MMVet [49], POPE [19] and MMT-Bench [48]. + +Implementation Details. We conducted SFT and DPO fine-tuning experiments on two representative MLLMs: Qwen2-VL-7B-Instruct [41] and LLaVA-Next-Llama3-8B [21], using our custom datasets MM-IFInstruct-23k for supervised fine-tuning (SFT) and MM-IFDPO-23k for direct preference optimization (DPO). For the SFT phase, we used a batch size of 128 and a learning rate of 1e-5. For the DPO phase, we used a learning rate of 5e-7 with the batch size of 16. We implemented our training pipeline with the help of LLaMAFactory and evaluation pipeline under VLMEvalkit [10]. + +# 5.1. Results about MM-IFInstruct-23k and MM-IFDPO-23k + +Consistently Improvements on Instruction Following Benchmarks. As shown in Tab. 1, both MM-IFInstruct-23k and MM-IFDPO-23k significantly enhance the model's performance in instruction following benchmarks. Finetuning LLaVA-Next and Qwen2-VL on MM-IFInstruct-23k + +yielded significant averaging performance gains of $6.1\%$ and $7.6\%$ points, respectively. Furthermore, applying DPO with MM-IFDPO-23k also led to notable improvements for LLaVA-Next and Qwen2-VL, with average gains of $11.6\%$ and $10.1\%$ points. Such improvements demonstrate the effectiveness of MM-IFEngine in constructing high-quality training data. + +Comparable Results on VQA Benchmarks. To show that fine-tuning on MM-IFInstruct-23k and MM-IFDPO-23k improves instruction following without degrading performance on other VQA tasks, we analyzed model performance on other widely used benchmarks, as detailed in Tab. 2. Results indicate that models fine-tuning with MM-IFInstruct-23k and MM-IFDPO-23k demonstrate comparable performance across these benchmarks. + +SFT vs DPO. As evidenced by Tab. 1 and Tab. 2, DPO using MM-IFDPO-23k significantly surpasses SFT on MM-IFInstruct-23k. This is likely due to negative samples of DPO, which are essential for training models to respect constraints, particularly in our data with multiple and diverse constraints. Additionally, the Kullback-Leibler (KL) divergence in DPO preserves the model's generalization, as demonstrated in Tab. 2. + +Table 3. Evaluation of various MLLMs on MM-IFEval. We report the accuracy of easy and difficult problems and the average accuracy across all problems. The C-Level and P-Level refer to the compose-level and perception-level problems, respectively. The best performance in each section is highlighted in bold. + +
ModelParamC-LevelP-LevelAvg.
Proprietary MLLMs
Claude-3.5V-Sonnet [1]-67.544.061.7
GPT-4o-mini [13]-70.440.062.8
GPT-4o (20240806) [13]-71.544.064.6
Open-Source MLLMs
LLaVA-NeXT-7B [21]7B36.816.031.6
LLaVA-OneVision-Qwen2-7b-OV [16]8B37.424.034.0
MiniCPM-V-2.6 [47]8B39.232.037.4
InternVL2-8B [7]8B45.232.041.9
InternVL2-40B [7]40B48.036.045.0
InternVL2.5-8B [6]8B49.636.046.2
InternVL2.5-26B [6]8B53.532.048.1
Qwen2-VL-72B-Instruct [41]72B53.443.050.8
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.7
+ MM-IFDPO-23k-58.721.049.3
Qwen2-VL-7B-Instruct [41]8B42.740.042.0
+ MM-IFDPO-23k-55.243.052.2
+ +# 5.2. Leaderboard of MM-IFEval + +We present the performance comparison results of various MLLMs on our MM-IFEval in Tab. 3, including both proprietary MLLMs such as GPT-4o [13] and Claude-3.5 [1] and open-source MLLMs such as LLaVA-Next [21], LLaVA-OneVision [16], InternVL [6, 7], and Qwen2-VL [41]. + +MM-IFEval is Challenging. Results on Tab. 3 demonstrate that multimodal instruction following is still a challenging and unsolved task for current MLLMs, specifically for the perception-level problems. The propriety models GPT-4o and Claude-3.5V-Sonnet establish top-tier average performance with scores of 64.6 and 61.7, respectively. The leading open-source MLLM, Qwen2-VL-72B merely achieves an overall accuracy of 50.8. We attribute the performance gap between proprietary and open-source models to the scarcity of high-quality open-source training data for instruction following. As a result of our MM-IFDPO-23k, Qwen2-VL-7B fine-tuned via our optimized DPO approach achieves a score of 52.2, demonstrating a $24.3\%$ relative improvement over its baseline (42.0), and even surpasses the larger Qwen2VL-72B model. We hope our MM-IFEval benchmark motivates further exploration into improving MLLM instruction-following. + +Benchmark Examples. Please refer to the Appendix for visual examples of MM-IFEval, including images and instructions with constraints for both compose-level and perception-level problems. + +# 5.3. Ablation Studies + +Ablation Studies on Different DPO Settings. In Tab. 4, we present an ablation study on various strategies for con + +Table 4. Ablation studies across different DPO settings, including randomly deleting constraints (second row to fourth row) or prompting MLLMs without images (bottom row) to generate negative responses. Avg. refers to the average score of three IF benchmarks. + +
ModelMM-IFEvalMIAIFEvalAvg.
Qwen2-VL-7B-Instruct42.080.547.456.6
+ DPO (-33% cons)51.588.257.965.8
+ DPO (-66% cons)51.288.058.465.9
+ DPO (-100% cons)52.288.159.766.7
+ DPO (w/o img)48.486.954.763.4
LLaVA-NeXT-Llama3-8B39.783.350.757.9
+ DPO (-33% cons)50.487.264.367.3
+ DPO (-66% cons)48.786.869.768.4
+ DPO (-100% cons)49.390.069.169.5
+ DPO (w/o img)44.785.964.865.2
+ +structuring pairwise preference data for Direct Preference Optimization (DPO). These strategies primarily include: (1) generating rejected responses by randomly removing constraints from the instruction (second to fourth rows), and (2) prompting MLLMs without providing image inputs to generate rejected responses (bottom row). + +We conduct experiments on both the Qwen2-VL-7B-Instruct and LLaVA-NeXT-Llama3-8B models. As shown in Tab. 4, all DPO variants exhibit strong robustness, consistently outperforming the baseline. Among the four evaluated strategies, removing $100\%$ of the constraints to generate rejected responses achieves the best performance, whereas omitting image inputs yields the weakest performance. Furthermore, we observe a consistent trend: as the proportion of removed constraints increases from $33\%$ to $100\%$ , the performance of the resulting DPO models improves accordingly. This suggests that removing more constraints amplifies the semantic gap between preferred and rejected responses, thereby enhancing the effectiveness of contrastive learning during DPO training. + +Based on these findings, we adopt the $100\%$ -constraint removal strategy as the default approach for constructing the DPO data in MM-IFDPO-23k. + +# 6. Conclusion + +This paper contributes to the field of multimodal instruction-following by exploring pipelines for training data collection and proposing a challenging benchmark. We present MM-IFEngine, a pipeline designed to generate image-instruction pairs, subsequently used to construct MM-IFInstruct-23k for SFT and MM-IFDPO-23k for DPO. We also analyze the limitations of existing multimodal instruction following benchmarks and propose MM-IFEval, a benchmark featuring diverse instruction types and a hybrid evaluation strategy that combines rule-based methods with an LLM-based judge. We hope this work inspires further research into improving the + +instruction-following ability of Multimodal Large Language Models, a critical step towards realizing their potential in diverse and impactful applications. + +# References + +[1] Anthropic. Claude 3.5 sonnet. 2024. 8 +[2] Yonatan Bitton, Hritik Bansal, Jack Hessel, Rulin Shao, Wanrong Zhu, Anas Awadalla, Josh Gardner, Rohan Taori, and Ludwig Schmidt. VisIT-Bench: A benchmark for vision-language instruction following inspired by real-world use. In NeurIPS, Datasets and Benchmarks, 2023. 1, 2, 3 +[3] Guiming Hardy Chen, Shunian Chen, Ruifei Zhang, Junying Chen, Xiangbo Wu, Zhiyi Zhang, Zhihong Chen, Jianquan Li, Xiang Wan, and Benyou Wang. Allava: Harnessing gpt4v-synthesized data for lite vision-language models. arXiv preprint arXiv:2402.11684, 2024. 3, 4, 2 +[4] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions, 2023. 3 +[5] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? In NeurIPS, 2024. 3, 7 +[6] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 7, 8 +[7] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 7, 8 +[8] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 3 +[9] Biplab Deka, Zifeng Huang, Chad Franzen, Joshua Hibschman, Daniel Afergan, Yang Li, Jeffrey Nichols, and Ranjitha Kumar. Rico: A mobile app dataset for building data-driven design applications. In Proceedings of the 30th annual ACM symposium on user interface software and technology, pages 845-854, 2017. 2 +[10] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM international conference on multimedia, pages 11198-11201, 2024. 7 +[11] Xinyu Fang, Zhijian Chen, Kai Lan, Shengyuan Ding, Yingji Liang, Xiangyu Zhao, Farong Wen, Zicheng Zhang, Guofeng Zhang, Haodong Duan, et al. Creation-mmbench: Assessing context-aware creative intelligence in mllm. arXiv preprint arXiv:2503.14478, 2025. 3 + +[12] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023.5.2 +[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. GPT-4o system card. arXiv preprint arXiv:2410.21276, 2024. 8 +[14] Yuxin Jiang, Yufei Wang, Xingshan Zeng, Wanjun Zhong, Liangyou Li, Fei Mi, Lifeng Shang, Xin Jiang, Qun Liu, and Wei Wang. Followbench: A multi-level fine-grained constraints following benchmark for large language models. In ACL, 2024. 1, 2 +[15] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 3, 7 +[16] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. LLaVA-OneVision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 7, 8 +[17] Huayang Li, Siheng Li, Deng Cai, Longyue Wang, Lemao Liu, Taro Watanabe, Yujiu Yang, and Shuming Shi. TextBind: Multi-turn interleaved multimodal instruction-following in the wild. In ACL Findings, 2024. 2 +[18] Jian Li, Weiheng Lu, Hao Fei, Meng Luo, Ming Dai, Min Xia, Yizhang Jin, Zhenye Gan, Ding Qi, Chaoyou Fu, et al. A survey on benchmarks of multimodal large language models. arXiv preprint arXiv:2408.08632, 2024. 3 +[19] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023. 7 +[20] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 3 +[21] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR,and world knowledge,2024.7,8 +[22] Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding, 2024. 2 +[23] Yangzhou Liu, Yue Cao, Zhangwei Gao, Weiyun Wang, Zhe Chen, Wenhai Wang, Hao Tian, Lewei Lu, Xizhou Zhu, Tong Lu, Yu Qiao, and Jifeng Dai. Mminstruct: a high-quality multi-modal instruction tuning dataset with extensive diversity. Science China Information Sciences, 67(12), 2024. 3 +[24] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. MMBench: Is your multi-modal model an all-around player? In ECCV, 2024. 3, 7 +[25] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. OCRBench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 2024. 3, 5, 7 +[26] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua + +Lin, et al. MMDU: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. In NeurIPS Datasets and Benchmarks Track, 2024. 3 +[27] Renze Lou, Kai Zhang, and Wenpeng Yin. A comprehensive survey on instruction following. arXiv preprint arXiv:2303.10475, 2023. 1 +[28] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 3 +[29] Ziyang Luo, Can Xu, Pu Zhao, Qingfeng Sun, Xiubo Geng, Wenxiang Hu, Chongyang Tao, Jing Ma, Qingwei Lin, and Daxin Jiang. Wizardcoder: Empowering code large language models with evol-instruct. arXiv preprint arXiv:2306.08568, 2023. 2 +[30] Yubo Ma, Yuhang Zang, Liangyu Chen, Meiqi Chen, Yizhu Jiao, Xinze Li, Xinyuan Lu, Ziyu Liu, Yan Ma, Xiaoyi Dong, et al. MMLongBench-Doc: Benchmarking long-context document understanding with visualizations. In NeurlPS Datasets and Benchmarks Track, 2024. 3 +[31] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5, 2 +[32] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023. Accessed: 2025-02-23. 3 +[33] OpenAI. GPT-4V(ison) System Card. 2023. Accessed: 2025-02-23. 3 +[34] Yusu Qian, Hanrong Ye, Jean-Philippe Fauconnier, Peter Grasch, Yinfei Yang, and Zhe Gan. MIA-Bench: Towards better instruction following evaluation of multimodal llms. In ICLR, 2025. 1, 2, 3, 5, 7 +[35] Yiwei Qin, Kaiqiang Song, Yebowen Hu, Wenlin Yao, Sangwoo Cho, Xiaoyang Wang, Xuansheng Wu, Fei Liu, Pengfei Liu, and Dong Yu. InFoBench: Evaluating instruction following ability in large language models. arXiv preprint arXiv:2401.03601, 2024. 1, 2 +[36] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 2, 5 +[37] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4, 2 +[38] Lucy Xiaoyang Shi, Brian Ichter, Michael Equi, Liyiming Ke, Karl Pertsch, Quan Vuong, James Tanner, Anna Walling, Haohuan Wang, Niccolo Fusai, et al. Hi Robot: Open-ended instruction following with hierarchical vision-language-action models. arXiv preprint arXiv:2502.19417, 2025. 2 +[39] Dingjie Song, Shunian Chen, Guiming Hardy Chen, Fei Yu, Xiang Wan, and Benyou Wang. Milebench: Benchmarking mllms in long context, 2024. 3 +[40] Fei Wang, Xingyu Fu, James Y. Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, + +Kai Zhang, Tianyi Lorena Yan, Wenjie Jacky Mo, Hsiang-Hui Liu, Pan Lu, Chunyuan Li, Chaowei Xiao, Kai-Wei Chang, Dan Roth, Sheng Zhang, Hoifung Poon, and Muhao Chen. Muirbench: A comprehensive benchmark for robust multi-image understanding, 2024. 3 +[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7, 8 +[42] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 5 +[43] Xilin Wei, Xiaoran Liu, Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Jian Tong, Haodong Duan, Qipeng Guo, Jiaqi Wang, et al. Videorope: What makes for good video rotary position embedding? arXiv preprint arXiv:2502.05173, 2025. 3 +[44] Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244, 2023. 2 +[45] Zhiyang Xu, Ying Shen, and Lifu Huang. Multiinstruct: Improving multi-modal zero-shot learning via instruction tuning, 2023. 3 +[46] Zhiyang Xu, Chao Feng, Rulin Shao, Trevor Ashby, Ying Shen, Di Jin, Yu Cheng, Qifan Wang, and Lifu Huang. Visionplan: Scaling human-labeled tasks in visual instruction tuning, 2024. 3 +[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. MiniCPM-V: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 8 +[48] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi, 2024. 3, 7 +[49] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. MM-Vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 7 +[50] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expertagi. In CVPR, 2024.3,7 +[51] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2. 5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025. 3 +[52] Yuhang Zang, Wei Li, Jun Han, Kaiyang Zhou, and + +Chen Change Loy. Contextual object detection with multimodal large language models. IJCV, 2025. 3 +[53] Tao Zhang, Yanjun Shen, Wenjing Luo, Yan Zhang, Hao Liang, Fan Yang, Mingan Lin, Yujing Qiao, Weipeng Chen, Bin Cui, et al. CFBench: A comprehensive constraints-following benchmark for llms. arXiv preprint arXiv:2408.01122, 2024. 1, 2 +[54] Xinghua Zhang, Haiyang Yu, Cheng Fu, Fei Huang, and Yongbin Li. Iopo: Empowering llms with complex instruction following via input-output preference optimization, 2024. 2 +[55] Xiangyu Zhao, Shengyuan Ding, Zicheng Zhang, Haian Huang, Maosong Cao, Weiyun Wang, Jiaqi Wang, Xinyu Fang, Wenhai Wang, Guangtao Zhai, et al. Omnialign-v: Towards enhanced alignment of mllms with human preference. arXiv preprint arXiv:2502.18411, 2025. 4 +[56] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. In NeurIPS Datasets and Benchmarks Track, 2023. 2 +[57] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023. 1, 2, 7 +[58] Wangchunshu Zhou, Yuchen Eleanor Jiang, Ethan Wilcox, Ryan Cotterell, and Mrinmaya Sachan. Controlled text generation with natural language instructions. In ICML, 2023. 2 + +# MM-IFEngine: Towards Multimodal Instruction Following Supplementary Material + +# A. MM-IFEval + +# A.1. An overview of Constraints and Instructions + +# A.1.1. Constraints + +Based on daily use cases and existing research, we have identified six main categories of constraints, which can be further divided into 32 specific constraint types shown in Fig. 5. In this section, we introduce and exemplify these six major constraint categories. For detailed descriptions and examples of all 32 subcategories, please refer to Table 5. + +Text Length Requirements. In this category, we focus on the length of the response, including the number of paragraphs, sentences, and words. We also consider the length of the response in the aspect of poetry or "Use yes or no to answer the question". It must be noted that we do not require the model to follow the strict requirement in exact numbers like "The response must be exactly 56 words". The constraints we propose in this category are based on reality, with precise numerical requirements only at the sentence or paragraph level, and of moderate size; the rest of the constraints are used to limit by ranges like "The response must be between 100 and 150 words", which aligns with the task that people tend to encounter in real-world scenarios. + +Mathematical Requirements. This category includes constraints related to the most common part of answering mathematical problems like precision, scientific notation, and other mathematical requirements. For example, "Keep two decimal places for the number in the answer", "Please round up all the numbers in the answer", or "Don't include specific numbers in your answers. Compare numbers with their relative sizes". + +Language & Formatting Requirements. This category includes constraints related to the language and formatting of the response, such as answering in a specific language, using a specific format like JSON, or using a specific style like poetry. Requirements for tense, writing style, numbering, list, and other language-related or formatting-related aspects are also included in this category. + +Rhetoric & Logic Requirements. "Rhetoric" refers to the art of using language to persuade or influence, while "Logic" refers to the principles of reasoning and argumentation. This category includes constraints related to the rhetoric and logic of the response, such as the use of metaphor, simple, cause-and-effect relationship, conditional statement, and other rhetoric and logic-related aspects. + +Action Requirements. "Action" refers to the action that the model should take like a human. We define this category as the constraints that require the model to perform a specific + +action, such as tone, role imitation, use specific prefix or suffix, or acting like under some specific situation. We hope this category can help us to evaluate the ability of the model to follow instructions and perform actions in more complex and realistic scenarios. + +Keyword Requirements. "Keyword" refers to the specific words or phrases that the model should include or avoid in the response. This category includes constraints related to the response keyword, such as the use of specific keywords, the avoidance of specific keywords, or the variation of specific keywords. For example, "Use at least three synonyms for 'innovation,' such as 'breakthrough,' 'new approach,' or 'invention,' spread throughout your text." + +# A.1.2. Instruction Tasks + +For source datasets lacking original task instructions, we constructed a diverse task pool containing 18 instructions that encourage open-ended responses from models. These instructions can be categorized into five task types: Descriptive Analysis, Emotional & Perspective, Creative Writing, Social Media & Content, and Roleplay. The classification information and examples of the instructions are shown in Table 6. + +# A.2. Perception-level Problems + +![](images/26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg) +Figure 6. Image Source Distribution in perception-level problems.Perception-level problems in MM-IFEval presents a systematic categorization of 100 challenging vision-based instructionfollowing tasks, organized into 13 distinct classes according to image content characteristics and task complexity. + +Perception-level problems in MM-IFEval comprise 100 carefully crafted questions with strong image-constraint correlations. The images can be categorized into 13 information-rich and complex domains shown in Figure 6. Figures 10, 11, 12, and 13 present representative examples from the web interface, diagram, poster, and visual difference categories, respectively, demonstrating the diverse visual challenges incorporated in our benchmark. + +![](images/6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg) +Figure 5. Demonstration of constraints categories. We designed 6 main categories for all the constraints used, with a total of 32 subcategories + +# B. Image Sources + +The quality of the image source is crucial for the performance of the model. Except of this, the diversity of the image source is also important to fully utilize or evaluate the ability of the model. We use the following image source: + +- Natural Scene: The natural scene is the most common image source, which is most used in the real-world like the image of a beautiful landscape, a busy street, or a crowded cafe. In this part, we sample images from CC3M[37] and ALLaVA[3]. +- UI Interface: The UI interface is the image from the UI interface of the website and mobile application. It is crucial because it represents a significant portion of real-world multimodal interactions where users need to understand and interact with digital interfaces. We collected diverse mobile app UI images from the RICO[9] dataset and web UI images from the MultiUI[22] dataset. +- Diagram & Chart: The diagram and chart are the image that contains some specific information like the data, the relationship between the data, or the change of the data. We collect diagram and chart images from ChartQA[31] dataset, which contains diverse diagram and chart images. +- **Mathematic:** The math problem is the image that contains a math problem, which is a common task in the real-world like the problem of the math, the solution of the math problem, or the calculation of the math problem. We collect math problem images from Geo170k[12] dataset, which contains diverse geometry problem images. + +# C. MM-IFEngine Prompt Template + +MM-IFEngine provides a scalable pipeline for mass-producing instruction-following datasets for multimodal large language models, functioning effectively regardless of whether source datasets contain original instructions. This engine enables systematic augmentation of existing visual datasets with diverse instruction-following tasks. Figures 14 and 15 demonstrate representative prompt templates from + +MM-IFEngine's two core components: the instruction generation module and the constraint integration module, respectively, illustrating the methodology behind our automated data construction process. + +# D. MM-IFInstruct and MM-IFDPO Dataset + +Our MM-IFInstruct dataset integrates three distinct data sources: CC3M (without original instructions), ALLaVA (with pre-existing questions), and a diversity collection composed of MultiUI, ChartQA, and Geo170k. To create the MM-IFDPO dataset for preference optimization, we randomly removed $33\%$ of constraints from the MM-IFInstruct samples to generate rejected examples. Figures 16, 17, and 18 illustrate representative samples derived from CC3M, ALLaVA, and our diversity collection, respectively, while Figure 19 demonstrates an example pair from the MM-IFDPO dataset showing both preferred and rejected instructions. + +# E. Evaluation + +# E.1. Rule-based + +We identified 10 constraint subcategories from our taxonomy of 32 that could be algorithmically verified. For these selected constraints, we developed specialized verification functions with targeted parameters. For efficiency, we employed large language models to analyze each constraint specification, select the most appropriate verification function, and extract the necessary parameters. All selections were subsequently validated through manual review to ensure the accuracy and quality of both the function selection and their parameters. The prompt template used for function selection and parameter extraction is illustrated in Figure 20, while Table 7 provides a comprehensive overview of all verification functions with their corresponding parameter examples. + +# E.2. Compare Judge Method + +Recent works[11, 28] have shown that GPT-4o has the ability to compare two responses from models. For constraint types lacking objective evaluation metrics (such as tone requirements or role imitation), we implemented a comparative assessment method. This approach requires the model under evaluation to generate two responses: one adhering to the target constraint and another without the constraint. A judge model then analyzes both outputs to determine whether significant differences exist between them, thereby more accurately assessing whether the model has successfully followed these subjective constraints. Figure 21 illustrates the prompt used in this comparative evaluation process. + +# E.3. Direct Judge Method + +The Direct Judge method provides the constraint and answer of the model under test directly to the Judge model, and its prompt template is shown in Figure 22. + +![](images/da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg) + +![](images/0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg) + +# Instruction + +What might have led to the dog's behavior as depicted in this image? + +![](images/f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg) + +# Constraints + +1.target Audience requirement: Your audience is a dog lover. +2.tense所需要的: Use present tense in the first paragraph and past tense in the second. +3.tone Requirement: Adopt a reassuring, empathetic tone as if consoling someone. +4.paragraph_number_limit: Your response must consist of exactly 3 paragraphs. +5.mention: Mention the term 'sorry' at least twice throughout your description. +6highlight所需要的: Use bold for the first occurrence of the term 'aggressive behavior' in each paragraph. +7wrap_up Requirement: Provide a final paragraph summarizing the key arguments. +8. perspective Requirement: Please answer the question in the second person. + +![](images/5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg) +Figure 8. A compose-level problem example from the MM-IFEval benchmark in the chart image category. + +![](images/907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg) +Figure 7. A compose-level problem example from the MM-IFEval benchmark in the general image category. + +# Instruction + +Which region has the highest value of apple production? Give the answer, and analyze the reasons for the large yield of apples in this area. + +![](images/0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg) + +# Constraints + +1. precision: In the answer, plot the output in the same unit. +2.title所需要的: Provide a concise title that summarizes the main idea. +3. perspective Requirement: Give your answer from the perspective of a Mexican agricultural expert. +4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences. +5. unstrict_formatting REQUIREments: Number the reasons for your analysis. + +![](images/6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg) + +![](images/63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg) + +# Instruction + +In triangle ABC, D is the midpoint of BC, E is the midpoint of AD, and F is the midpoint of CE. Given that the area of triangle ABC is 28 square centimeters, consider the impact of these midpoints on the subdivisions of the triangle. Analyze how these midpoints affect the areas of triangles within triangle ABC and provide a detailed explanation to find the area of the shaded region that is formed within triangle BEC and triangle AEC. Finally, deduce and conclude which part of the interior triangles contribute to the shaded area. + +![](images/d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg) + +# Constraints + +1.target Audience requirement: Write your answer for a liberal arts student. You're tutoring her in math. +2(word_count_range_limit: Please write between 150 and 200 words in total. +3.paragraph_number_limit: Your response must consist of exactly 4 paragraphs. +4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences. +5.not Mention: Please do not mention the words 'formula' or 'equation' in your answer. +6.mention: Mention the word 'midpoint' at least three times throughout your description. +7.tone Requirement: Write your answer in a positive and encouraging tone, emphasizing the simplicity of the geometric concepts involved. + +![](images/589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg) +Figure 10. A perception-level problem example from the MM-IFEval benchmark in the web category. + +熱門 + +![](images/4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg) + +BITCOIN BTC + +![](images/7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg) + +ETHEREUM ETH + +![](images/2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg) + +TETHER U... USDT + +![](images/b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg) + +USDC USDC + +![](images/6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg) + +BNB BNB + +![](images/f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg) + +BUSD BUSD + +3,156,526.95 $0.76\%$ + +86,060.91-2.64% + +32.83-0.03% + +32.83 -0.01% + +19,024.08+0.47% + +32.890.08% + +![](images/ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg) + +![](images/7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg) + +![](images/37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg) +Figure 9. A compose-level problem example from the MM-IFEval benchmark in the geometry image category. + +# Instruction + +If someone just bought the orange currency for $12,000 and the blue currency for$ 15,000, what is the total amount of money they have now, based on the current currency situation? Round off the decimal part of the answer. + +![](images/0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg) + +# Ground Truth + +26907 + +![](images/93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg) +Figure 11. A perception-level problem example from the MM-IFEval benchmark in the diagram category. + +![](images/ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg) + +# Instruction + +In this flowchart, which node is reached after the first condition encountered from Start is judged to be Yes? Preserve the case of node names. + +![](images/f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg) + +# Ground Truth + +End + +![](images/7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg) +Figure 12. A perception-level problem example from the MM-IFEval benchmark in the poster category. + +![](images/3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg) + +# Instruction + +Observe the alphabet represented by white dots and line segments in the figure. Starting from 'A', what is the second letter composed of eight white dots? Output this letter in uppercase. + +![](images/65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg) + +# Ground Truth + +G + +![](images/23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg) + +![](images/5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg) +Figure 14. Prompt template for image generation instructions using a large language model in MM-IFEngine. + +# Instruction + +Sam and Tom used the red box and Tom used the blue box. They each gave three answers. Would you please judge which of the two boys found more differences? Print the name of the winning boy directly. + +![](images/7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg) +Figure 13. A perception-level problem example from the MM-IFEval benchmark in the finding difference category. + +![](images/2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg) + +# Ground Truth + +Tom + +# Instruction generation prompt + +You are an expert in generating concise instructions for images. + +## Task + +Given the image, generate a list of appropriate instructions for it. Your instructions should not be too long or overly detailed, and they should not include any specific details about the image. + +On one hand, you can choose appropriate instructions cases for the provided image from the Examples and modify them naturally for the image. + +On the other hand, you can generate new instructions, but only if these new instructions are relevant and appropriate for the image. + +Examples + +{original instructions list} + +You output format should be in the following format: + +{output format} + +# Constraint integration prompt + +You are an expert in add appropriate constraints to the instruction for images. + +# Task + +Given the original instruction, your task is to expand the instruction by adding constraints to it. + +You can select **as many as possible** appropriate types of constraints from the given **Constraints List** below and modify them. However, ensure that the constraints you generate meet the following requirements: + +1. Maintain the thematic consistency of the original instruction. +2.Be relevant and appropriate for the original instruction and be concise. +3. Do not conflict with the original instruction or with each other. + +For example, if the original instruction is a simple query like "What color is the truck?" you should avoid adding constraints such as "Your response must be in three paragraphs," as such a requirement is unnecessary for a short and simple question. Moreover, if the original instruction is a question like "What is the object in the image?", you should avoid adding constraints such as "Respond in the second-person to directly address the reader," as it conflicts with the original instruction. + +```txt +Original Instruction {originalInstruction} +``` + +```erb +## Constraints List +Your added constraints can be from the following types: +{constraints_list_str} +``` + +```txt +Output Format Your output should follow the format below: {output format} +``` + +Figure 15. prompt template for integrating constraints in MM-IFEngine. + +![](images/ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg) +Instruction + +![](images/fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg) + +Write a short poem that captures the hustle and bustle of city life depicted in this image. + +![](images/7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg) +Figure 16. A sample constructed by MM-IFEngine pipeline from cc3m dataset + +# Constraints + +1. Write each stanza so it includes a parenthetical phrase. +2.Each stanza should have 4 lines. +3.Use bold for every noun. +4.Start the poem with 'In the heart of the city...' and end with '...the city never sleeps.' +5.Use present tense throughout the poem. +6.Use the third-person perspective to maintain an objective viewpoint. + +![](images/b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg) +Answer + +In the heart of the city, where the \*\*streets\*\* hum with life. + +People rush (beneath the bright blue sky), + +Buses pass (\*\*tires\*\* whispering on the road), + +Every moment feels like a lively show. + +The \*\*signs\*\* stand tall amidst the crowd, + +An ambulance speeds by (with urgent sound), + +Newspapers rest (in their quiet stack), + +As footsteps weave through the urban track. + +Buildings rise, their facades grand, + +A man walks past (across the land), + +Voices blend into ambient noise, + +Life unfolds without any poise. + +Shoppers carry bags (full of delight), + +Sunlight dances (on the pavement's sight), + +Each face tells tales of daily grind, + +Yet spirits soar, unconfined....the city never sleeps. + +![](images/bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg) +Instruction + +What treat can be baked in an oven? + +![](images/bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg) +Figure 17. A sample constructed by MM-IFEngine pipeline from Allava dataset + +# Constraints + +1. Write your answer for a general adult audience with limited baking experience, providing clear and simple explanations. +2. Write your answer in a positive and encouraging tone, making the baking process seem accessible and enjoyable. +3. Mention both 'oven' and 'baking' at least once in your response, ensuring they are used naturally and contextually. +4. Begin every sentence with an exclamation mark to convey excitement about baking. +5. Your response must consist of exactly 3 paragraphs, each focusing on a different type of treat that can be baked in an oven. + +![](images/bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg) + +# Answer + +!One delightful treat you can bake in an oven is cookies! Whether you prefer classic chocolate chip or something more adventurous like oatmeal raisin, baking cookies is a fun and rewarding activity. The aroma of freshly baked cookies wafting through your home will make everyone excited! + +! Another scrumptious option for oven baking is muffins! Muffins are perfect for breakfast or as a snack, and you can customize them with various ingredients such as blueberries, bananas, or even chocolate chips. Baking muffins allows you to experiment with flavors while enjoying the satisfaction of creating something delicious from scratch. + +! Lastly, consider baking a cake! Cakes come in countless varieties, from simple vanilla to rich chocolate, and can be decorated to suit any occasion. Baking a cake not only provides a sweet treat but also enhances your skills in mixing, measuring, and decorating. So grab your apron and get ready to enjoy the magic of baking in your own kitchen! + +![](images/ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg) +Figure 18. A sample constructed by MM-IFEngine pipeline from geo170k dataset + +![](images/ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg) + +# Instruction + +In the diagram, quadrilateral ABCD is inscribed in circle O with diameter BE and line AE connected to it. Given that the measure of angle BCD is twice the measure of angle BAD, and that angle DAE is equal to angle CAD, develop a comprehensive geometric proof to determine the degree measure of angle DAE. + +![](images/f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg) + +# Constraints + +1. Your response must consist of exactly 3 paragraphs, each focusing on a different aspect of the proof: the initial setup, the application of theorems, and the final calculation. + +2. Each paragraph should contain between 4 and 6 sentences, ensuring a balanced and detailed explanation. +3.The response must be between 120 and 150 words in total, maintaining clarity and conciseness. + +4. Write your answer in a clear and methodical tone, suitable for a high school geometry class. + +5.Provide a concise title that summarizes the main idea of the proof, such as 'Geometric Proof for Angle DAE'. + +![](images/5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg) + +# Answer + +**Geometric Proof for Angle DAE** To begin our proof, we note that since quadrilateral ABCD is inscribed in circle O with BE as its diameter, angle ABE is a right angle (90 degrees) by the Inscribed Angle Theorem. This theorem states that an angle inscribed in a semicircle is always a right angle. Additionally, given that angle BCD is twice angle BAD, let's denote angle BAD as $\backslash(x\backslash)$ . Therefore, angle BCD measures $\backslash(2x\backslash)$ . $\backslash$ n\NNext, applying the properties of cyclic quadrilaterals, we know that opposite angles sum up to 180 degrees. Thus, angle BAD plus angle BCD equals 180 degrees. Substituting the values, we get $\backslash(x + 2x = 180^{\wedge}\backslash\text{circ} \backslash)$ , leading to $\backslash(3x = 180^{\wedge}\backslash\text{circ} \backslash)$ or $\backslash(x = 60^{\wedge}\backslash\text{circ} \backslash)$ . Since angle DAE is equal to angle CAD, and considering triangle ADE where AD is common, angle DAE also measures 30 degrees because angle CAD is half of angle BAD due to symmetry and the given condition. Finally, calculating the exact measure of angle DAE involves recognizing that angle CAD is half of angle BAD, which was determined to be 60 degrees. Hence, angle CAD is 30 degrees. Consequently, angle DAE, being congruent to angle CAD, also measures 30 degrees. This completes our proof, demonstrating that angle DAE indeed measures 30 degrees based on the provided conditions and geometric principles applied throughout the process. + +![](images/d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg) +Figure 19. A DPO training set sample, where the rejected data is obtained by removing $33\%$ of the constraints + +![](images/7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg) +Figure 20. Prompt template for automated verification function selection and paramater extraction + +# prompt + +What might the seabird be doing flying alone over the ocean? Respond in the third-person perspective to maintain an objective viewpoint. Write the answer in the form of a brief nature observation log entry. Each paragraph should contain between 3 and 5 sentences. Write your answer for a general adult audience with an interest in wildlife and nature. Conclude with a single sentence summarizing the likely behavior of the seabird. + +![](images/a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg) + +Chosen + +Score:5/5 + +In today's observation, a solitary seabird was noted gliding gracefully above the vast expanse of the ocean. The bird's wings were fully extended, indicating efficient use of air currents to conserve energy while searching its surroundings. Seabirds often fly alone when scouting for food sources such as schools of fish or marine organisms near the surface. This lone flight could also serve purposes like territorial patrol or simply enjoying favorable wind conditions. In conclusion, the seabird is most likely engaged in foraging activities. + +![](images/5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg) + +Rejected + +Score:3/5 + +**Nature Observation Log Entry** +**Date:** [Insert Date] +**Location:** [Insert Location] + +**Observation:** A solitary seabird, possibly a seagull or pelican, is seen soaring gracefully above the vast expanse of the ocean. The bird's wings are spread wide, indicating it is likely gliding or flying at a steady pace. + +The water below appears calm, with gentle ripples on its surface, suggesting a serene environment. The bird's position and posture suggest it may be searching for food, such as fish or crustaceans, that are abundant in the ocean's depths. The absence of other birds or marine life in the immediate vicinity could imply that this bird is either hunting alone or simply enjoying a solitary flight over the sea. + +\*\*Analysis:\*\* Seabirds like seagulls and pelicans are known for their ability to glide and soar on the wind currents, conserving energy while searching for food. + +The bird's choice of location, over the ocean, indicates its reliance on marine resources for sustenance. The calmness of the water suggests favorable weather conditions, which are essential for successful hunting and gliding. This observation provides insight into the key role that birds play in the formation of such environments. + +highlighting their adaptability to oceanic environments and their role in the marine ecosystem. + +# Prompt to choose verified function and extract params + +Please analyze the following constraint and select the most appropriate function from the given list to verify this constraint. Then extract the required parameters for the verification function from the constraint. + +Constraint content: {constraint value} + +Available verification functions: {all candidate validation function names and parameters} + +Please complete the analysis following these steps: + +**Your task:** + +1. Select the most appropriate verification function from the above list (return empty if none is suitable) +2. Extract the required parameters from the constraint based on the function description + +**Please return the result in JSON format as follows:** {output format} + +# Compare Judge Prompt + +You are an expert in judging whether the response follows the given constraint. Your task is to assess whether the model's response satisfies the given constraint and return True or False. I will provide you with the constraint and the model's response under this constraint. To assist with your evaluation, I will also provide you with the model's response to the same question without the constraint. + +Constraint: {constraint} + +Response under the constraint: {pred_with Constraint} + +Response without the constraint: {pred Without constraint} + +**Please follow the steps below to evaluate**: + +Step 1. Compare the model's response under the constraint with its response without the constraint. If you believe these two answers are very similar, it means the model has not fully considered the impact of the constraint on the answer. Please return False. +Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response does not meet the requirements specified in the constraint, return False. Otherwise, if the response effectively satisfies the constraint, return True. + +**Response Format**: Your answer should only include "True" or "False", and no additional text. + +Figure 21. Prompt template for Compare Judge Method + +# Direct Judge Prompt + +Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints. + +Please follow the requirements below to make the judgment: + +1. Be strict and consistent in your assessment. +2. You should refer to the content of image to make the judgment. +3. For one constraint, if the response fails to fully meet the constraint, give it a score of 0. + +Otherwise, give it a score of 1. + +{prediction} + + + + + +{constraints_str} + + + +You should judge and explain for each constraint in the constraint list without omitting any constraint. Finally, list scores of all the constraints in one sentence. + +You should strictly follow the format below: + +Judgement: ... + +Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of constraint_n: x/1. + +Figure 22. Prompt template for Direct Judge Method + +
Main ClassSubclassEvaluationDescriptionExample
A. Rhetoric & LogicA.1 Rhetoric requirementsCompare JudgeConstraint that requires the response to use a specific rhetorical technique.“Your output should include a metaphor.”
A.2 Logical relationDirect JudgeConstraint that ensures logical cohesion within the response by requiring specific logical connectors or structures.“Each paragraph must contain at least one cause-and-effect relationship.”
B. Format limitB.1 Natural languageDirect JudgeConstraint specifying which natural language(s) should be used in the response.“Please answer in Spanish.”
B.2 Part of speechDirect JudgeConstraint that requires the response to use a specific part of speech.“Use at least three adjectives in your response.”
B.3 Sentence structureDirect JudgeConstraint that specifies special sentence structures to be used in the response.“Write each sentence so it includes a parenthetical phrase.”
B.4 Tense requirementsDirect JudgeConstraint that specifies the use of multiple tenses within the response.“In past tense totally.”
B.5 PunctuationRule-baseConstraint specifying unconventional yet feasible punctuation usage in the response.“Replace all periods with semicolons.”
B.6 HighlightDirect JudgeConstraint that specifies a unique but manageable method for highlighting text.“Use **bold** for every noun.”
B.7 Title requirementsDirect JudgeConstraint that specifies how titles should be added to the response.“Provide a concise title that summarizes the main idea.”
B.8 Style requirementsCompare JudgeConstraint that specifies an unconventional or distinctive writing style for the response.“Write the answer in the form of a brief detective story.”
B.9 Case requirementsDirect JudgeConstraint specifying an unusual yet readable approach to letter case in the response.“Write all nouns in UPPERCASE and all adjectives in lowercase.”
B.10 Unstrict formatDirect JudgeConstraint specifying a unique format for the output while keeping it approachable.“Format your response as a short play script with speaker labels.”
B.11 Strict formatDirect JudgeConstraint that requires the response to follow a strictly defined format.“Please provide the output as well-formed XML with custom tags.”
B.12 Number and ListDirect JudgeConstraint for using numbered or bulleted lists in the response.“Present all key points as a numbered list with bulleted sub-lists.”
B.13 Wrap upDirect JudgeConstraint that requires a concise, well-structured summary or conclusion.“Provide a final paragraph summarizing the key arguments.”
B.14 First letterDirect JudgeConstraint specifying a pattern for the first letters of sentences or paragraphs.“Each sentence should begin with a letter that progresses through the alphabet.”
C. Text Length limitC.1 Paragraph limitRule-baseConstraint that specifies the number of paragraphs in the response.“Your response must consist of exactly 4 paragraphs.”
C.2 Sentence limitRule-baseConstraint that specifies the number of sentences in each paragraph.“Totally use 5 sentences in your response.”
C.3 Word limitRule-baseConstraint that specifies a small range for the total number of words in the text.“Your response must be a single word or phrase.”
D. Math limitD.1 PrecisionRule-baseConstraint that specifies the level of precision required in mathematical calculations.“Keep two decimal places for all numbers in the answer.”
D.2 Scientific notationRule-baseConstraint that requires the use of scientific notation for large or small numbers.“Express all numbers greater than 1,000 in scientific notation.”
E. Action limitE.1 Role imitationCompare JudgeConstraint requiring the response to imitate the tone and style of a specific role or public figure.“Please answer in the style of a sports commentator.”
E.2 Prefix and SuffixRule-baseConstraint that requires the response to begin or end with a specific phrase or symbol.“Please start your answer with ‘Once upon a time...’”
E.3 Tone requirementCompare JudgeConstraint specifying an emotional tone for the response.“Write your answer in a positive and encouraging tone.”
E.4 PerspectiveDirect JudgeConstraint that specifies a narrative perspective for the response.“Write your answer in the first-person singular as a personal account.”
E.5 Target audienceCompare JudgeConstraint requiring the response to be tailored for a specific audience.“Craft your response as if explaining to high school students.”
E.6 SituationCompare JudgeConstraint requiring the response to be set in a specific situation or scenario.“Answer as if you are giving safety instructions before a flight.”
E.7 Prior conditionDirect JudgeConstraint stating that when a specific condition is met, the response must follow a particular process.“If the user requests legal advice, begin with a disclaimer.”
F. KeywordF.1 MentionRule-base & Direct JudgeConstraint that requires including a specific keyword a certain number of times.“Mention ‘GreenTech’ exactly three times throughout.”
F.2 Not mentionRule-base & Direct JudgeConstraint that requires avoiding specific keywords or phrases.“Do not mention the words ‘budget’ or ‘investment’.”
F.3 Multiple mentionRule-base & Direct JudgeConstraint requiring including multiple specified keywords in a balanced manner.“Mention both ‘sustainability’ and ‘renewable energy’ at least twice.”
F.4 Keyword variationDirect JudgeConstraint requiring the use of synonyms or variations of a given keyword.“Use at least three synonyms for ‘innovation’ throughout your text.”
+ +Table 5. Constraint Categories and Evaluation Methods for MM-IFEval + +
CategoryInstruction
Descriptive AnalysisDescribe the animal's typical habitat, diet, and one unique behavioral trait.
Provide a detailed analysis of the image, including the setting, characters, and notable objects.
Explain the activity taking place in the image.
Describe the activities of the person on the left in the image.
Emotional & PerspectiveWhat emotions do you think the person in this image might be feeling?
Imagine you are the person on the left in the scene depicted in this image, write a story about what you would do next.
Personify the sign in the image and express its feelings about the rule it presents.
Creative WritingCreate a short conversation between any two individuals in the scene.
Pretend this snapshot belongs to a larger story. Write a quick paragraph setting up the next plot twist.
Use this picture as your muse. Craft a brief poem—any style—that captures the emotion you sense.
Turn this scene into a short children's story focusing on wonder and curiosity.
Write a short poem with two stanzas, inspired by the emotion or content depicted in this image.
Social Media & ContentAssume this is an image you are about to post on Twitter. Please provide a short, upbeat caption describing it.
Assume you are creating a Pinterest pin with this image. Write a short inspirational or motivational caption to accompany it.
If this image were promoting an upcoming event, compose a quick announcement with the date, a highlight of what to expect, and a call-to-action.
Role PlayImagine you are the photographer who took this picture. Briefly explain why you chose to capture this particular moment and what story you hope it conveys.
+ +Table 6. Task Pool for MM-IFEngine + +
Verified Function NameFunction ParametersConstraint ExampleParameter Example
check Whether\_responseParagraph\_number_in_rangelower_bound:int,upper_bound:intThe number of text paragraphs be at least 3[3, 10000]
check Whether\_response\_sentence\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences be exactly 3[3, 3]
check Whether\_each\_paragraph\_sentence\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences in each paragraph be less than 3[0, 2]
check Whether\_each\_paragraph\_sentence\_number_in_range_listranges:List[tuple]The number of sentences in the first paragraph be exactly 3, and in the second paragraph be at most 2[(3, 3), (1, 2)]
check Whether\_each\_paragraph\_sentence\_number_exceedsexceed_num:int,upper_bound:intEach new paragraph should have 1 sentence more than the previous one, no paragraph exceeds 7 sentences[1, 7]
check Whether\_response_word_count_in_rangelower_bound:int,upper_bound:intThe number of words should be between 50 and 80[50, 80]
check Whether\_each\_paragraph\_word_count_in_rangelower_bound:int,upper_bound:intThe number of words in each paragraph should be between 50 and 80[50, 80]
check Whether\_each\_paragraph\_word_count_in_range_listranges:List[tuple]The number of words in the first paragraph be between 20 and 30, in the second between 50 and 80[(20, 30), (50, 80)]
check Whether\_whole\_response_notContain_certain_substringsubstring:strThe response should not contain the word "apple"["apple"]
check Whether\_whole\_response_notContain_certain_substringssubstrings:List[str]The response should not contain the words "apple" and "banana"[["apple", "banana"]]
check Whether\_each\_sentence_begin_with_certain_substringsubstring:strEach sentence should start with exclamation point["!"]
check Whether\_each\_sentence_end_with_certain_substringsubstring:strEach sentence should end with "apple"["apple"]}
check Whether\_whole\_response_begin_with_certain_substringsubstring:strThe response should start with "apple"["apple"]}
check Whether\_whole\_response_end_with_certain_substringsubstring:strThe response should end with "apple"["apple"]}
check Whether\_keywords_metioned_in_rangekeywords:List[str], lower_bound(times:int, upper_bound(times):intThe response should mention the word "apple" at least 3 times[["apple"], 3, 10000]
check_number_precision_in_responseprecision:intThe numbers in the response should have 2 decimal places[2]
check Whether has no\_number_in_response-The response should not contain any number[]
check Scientific_notation\_precision_in_responsesignificantDigits:intThe numbers in the response should have 3 significant digits[3]
+ +Table 7. Verification Functions for rule-based evaluation method in MM-IFEval \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07957/images/0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg b/data/2025/2504_07xxx/2504.07957/images/0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9df62de778dcfdd8c81397b349d7fac7ef448d95 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9277ee9e7d956b7325a7c78c3b1df690b75be9cbd3f9d83a53a58509916d58f8 +size 2375 diff --git a/data/2025/2504_07xxx/2504.07957/images/01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg b/data/2025/2504_07xxx/2504.07957/images/01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdac3b8f82836025d7634070f69212427365ea08 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a4f598b7f9fb9287eed8fcec19b4dff9d4c1460f619e6378caeb4e2081e6d64 +size 1543 diff --git a/data/2025/2504_07xxx/2504.07957/images/0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg b/data/2025/2504_07xxx/2504.07957/images/0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..177e7286aa5a58dd43c77c7cc117c208beb02054 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcab47136d611dfa610e06b26aeff2d84b57383abd01427d7894e48bf2c915c0 +size 2968 diff --git a/data/2025/2504_07xxx/2504.07957/images/0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg b/data/2025/2504_07xxx/2504.07957/images/0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..179de468bd37994350a1acc52aab41070785b2cd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c11da5d905a9a1523d10173276751fafae598a9d9e1e9bd862bfde413c4d3d6b +size 2368 diff --git a/data/2025/2504_07xxx/2504.07957/images/1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg b/data/2025/2504_07xxx/2504.07957/images/1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg new file mode 100644 index 0000000000000000000000000000000000000000..657509743fc113d80a0bf9208a4d08074e7bccf5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:831dff880a530c933d87f1c951f25670f983dbca38afa07361926ac2c285f2ac +size 3434 diff --git a/data/2025/2504_07xxx/2504.07957/images/22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg b/data/2025/2504_07xxx/2504.07957/images/22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c3af99f1fd40d03155f8aa72450b6e8e6b05426 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2ab906093fb25064e7faea7a32560806bfb698c3b09773380e4a673d3b68a54 +size 91325 diff --git a/data/2025/2504_07xxx/2504.07957/images/23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg b/data/2025/2504_07xxx/2504.07957/images/23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34fcc2a2dfc6841191cdae34c70f9d791343e493 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6978df6e689398ef2262d15267b6ed4e673d73cfbb5e3863265cc97bc6cad35 +size 22485 diff --git a/data/2025/2504_07xxx/2504.07957/images/253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg b/data/2025/2504_07xxx/2504.07957/images/253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1687a4cfa8bc33f1856c3348225ff8a0695e6a3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a245d61a544437cad8041e4c1afc4f8d6df56e3d86959fb05b678a99623845a9 +size 1270 diff --git a/data/2025/2504_07xxx/2504.07957/images/26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg b/data/2025/2504_07xxx/2504.07957/images/26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d12bb7633f9cfd8bf3b7df5e83e990eaf90b3c9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c5a24b2c1e6a0aafdd823fef93b816afb92687c13ea3dbc84039866c8e42c8 +size 11297 diff --git a/data/2025/2504_07xxx/2504.07957/images/2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg b/data/2025/2504_07xxx/2504.07957/images/2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d233e6a495053f53e05360bc03a64c88fb7474f2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7030eeaad261c1b73a55c0b240cb90c2c4fe134c15af82247be4d0d26da6f295 +size 2187 diff --git a/data/2025/2504_07xxx/2504.07957/images/2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg b/data/2025/2504_07xxx/2504.07957/images/2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80a1a81532a94c0a9ce379135dca46b837669b67 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9dc423584fc93217cd5cfed6c6818d1495c5340c4a5dd8eabdf653bf8cd5e95 +size 1117 diff --git a/data/2025/2504_07xxx/2504.07957/images/37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg b/data/2025/2504_07xxx/2504.07957/images/37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ac76b56e4f4c0689e8516193465b6e9be7a2444 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9adc55f222df8e34c90a6fba5d97a03be6ac4752421dc848a0bd631394c0855e +size 2538 diff --git a/data/2025/2504_07xxx/2504.07957/images/3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg b/data/2025/2504_07xxx/2504.07957/images/3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8c25b559e660633e740a407a3a30878b643e4b6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a4ae7e948a636d1b1de83226fc90984ba7a52a73929998eb9a6b56b0592089e +size 2437 diff --git a/data/2025/2504_07xxx/2504.07957/images/3d7a238954389d9ee0e5757dcc7bb6a09455c5244976e249843db6c5a9f8ab02.jpg b/data/2025/2504_07xxx/2504.07957/images/3d7a238954389d9ee0e5757dcc7bb6a09455c5244976e249843db6c5a9f8ab02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d389e06e36e7c505441590c67d59ff9ecc6c03c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/3d7a238954389d9ee0e5757dcc7bb6a09455c5244976e249843db6c5a9f8ab02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07f8995b917af15c81a4a678e7850ad4bb685fa06511645c73692c160cd29c32 +size 48496 diff --git a/data/2025/2504_07xxx/2504.07957/images/40a6ec4b503c24eee160a53d54fb61c79b65802f8cbc3c328e2ec808cb3733e6.jpg b/data/2025/2504_07xxx/2504.07957/images/40a6ec4b503c24eee160a53d54fb61c79b65802f8cbc3c328e2ec808cb3733e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..560bb756322a15e53dcdcca634d346a086a26b2e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/40a6ec4b503c24eee160a53d54fb61c79b65802f8cbc3c328e2ec808cb3733e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4076b43b9429b227b380a0f9c0191529f3d4195eed405bb968cb26fdc6557172 +size 4896 diff --git a/data/2025/2504_07xxx/2504.07957/images/4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg b/data/2025/2504_07xxx/2504.07957/images/4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc24a38eb1ea36d2952b4ceb1b8644b00d192660 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d614657dc1f877a67268871d335ee2fa2f62560b75366b055187547b40a5368e +size 1110 diff --git a/data/2025/2504_07xxx/2504.07957/images/514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg b/data/2025/2504_07xxx/2504.07957/images/514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62fde7d81d51bef1e7e3323d52051e4176a1437b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01628befd493194ccf4ecb7744457beb568bfe10986f258d56d4f2936e2b7c2b +size 37539 diff --git a/data/2025/2504_07xxx/2504.07957/images/5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg b/data/2025/2504_07xxx/2504.07957/images/5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ff86c1777a34d16ee0a88fe8b3e856ccc8c619b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d013be6140be607cef2a282bfbd062a4271c79320cc8a4e69f744e4e05f90ec4 +size 1502 diff --git a/data/2025/2504_07xxx/2504.07957/images/5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg b/data/2025/2504_07xxx/2504.07957/images/5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg new file mode 100644 index 0000000000000000000000000000000000000000..033eaf03b4afe9a2a2684a9c4a2ad64daa39790f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d49469793ece90fec4631c8f5bf16a0c6c29902f5afb979a366e8052ce2429e3 +size 14021 diff --git a/data/2025/2504_07xxx/2504.07957/images/589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg b/data/2025/2504_07xxx/2504.07957/images/589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b75cf4a62161d72f577379f2f1d67c5803ff3f19 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b032cb3272761f7ec3b0bdb69d18e61762bafc688aca9514607d35d1ca3cc0c +size 860 diff --git a/data/2025/2504_07xxx/2504.07957/images/5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg b/data/2025/2504_07xxx/2504.07957/images/5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8cc65dd603a650fcb70ca7bf0c4e77d0546d9ef --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2aa0c0da6fc098cc1aa97c25c40485e5f2e4f4ef2755212be9892ef10a6bbe2 +size 3589 diff --git a/data/2025/2504_07xxx/2504.07957/images/5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg b/data/2025/2504_07xxx/2504.07957/images/5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ae2b45910934e96ae434a1ba8909294e3dcd814 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8467b945dde5780bc5313d2916e007f547ccb0a5fdcdd091fa601a626f86b1e5 +size 2323 diff --git a/data/2025/2504_07xxx/2504.07957/images/5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg b/data/2025/2504_07xxx/2504.07957/images/5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1364beda2d427f64425feca46f26eba98b20548e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cfd879a32292427acbbc14f523e66f12ec43d0695265d46f80a5c723d96b57a +size 1425 diff --git a/data/2025/2504_07xxx/2504.07957/images/6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg b/data/2025/2504_07xxx/2504.07957/images/6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5706af8b61a1bc1f5be3053c8a8fbc07157772c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fdbface5aafc336116adc88137b53b4ce9eddc4b78d191e552a957a7df1be95 +size 1073 diff --git a/data/2025/2504_07xxx/2504.07957/images/63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg b/data/2025/2504_07xxx/2504.07957/images/63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8355c75f90b98a35cacc10c440252191f6551cf5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0817b165ace02e171f318d3e390b42ecd2510e577c48add211f865824ed0bdb +size 2738 diff --git a/data/2025/2504_07xxx/2504.07957/images/65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg b/data/2025/2504_07xxx/2504.07957/images/65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66abbbebe2f08a79f6cf987e3eb6434951641241 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a4f5a91708bd9c702dfaed3c4bf66349c8c04d607412a7e4a8972c69ddc566e +size 2230 diff --git a/data/2025/2504_07xxx/2504.07957/images/6b5b9b93d1fcd79a581602fab86639d6577f9b424223acb45e02e3004e7e0e55.jpg b/data/2025/2504_07xxx/2504.07957/images/6b5b9b93d1fcd79a581602fab86639d6577f9b424223acb45e02e3004e7e0e55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c3a1d2fcd9e6c177082b2d1a0694c3f0b848369 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/6b5b9b93d1fcd79a581602fab86639d6577f9b424223acb45e02e3004e7e0e55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d2ed7170d538d46c9bfd02b416377a1b0746b1787a0b5e67cc13f8a50e969bf +size 74145 diff --git a/data/2025/2504_07xxx/2504.07957/images/6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg b/data/2025/2504_07xxx/2504.07957/images/6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e73c626482212070dfeff48e96474e6a1a79f0f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c634df8b6cbbeb2a8482cff4ffc2186b327cb64df328b8d1bfc47c3cd1c2a7b +size 8909 diff --git a/data/2025/2504_07xxx/2504.07957/images/6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg b/data/2025/2504_07xxx/2504.07957/images/6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fec7899d6004ea73e5e535d5e7ef3edbeb70a26 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a7805508d9bce10682f9a65b7b06cd1989a84369fcbecf67f7ac69795997ed5 +size 48364 diff --git a/data/2025/2504_07xxx/2504.07957/images/7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg b/data/2025/2504_07xxx/2504.07957/images/7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2b0c85bab73d15c86d9396bc2ff69f4de931b72 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e0460d38257bbaf296e8d6953bea5b53f71ce2831a20cd4b045ce0b9172d465 +size 1533 diff --git a/data/2025/2504_07xxx/2504.07957/images/746967d3754041f1cfef9cc64a9093ae1f7ea58de21ff7208c444192c75c151d.jpg b/data/2025/2504_07xxx/2504.07957/images/746967d3754041f1cfef9cc64a9093ae1f7ea58de21ff7208c444192c75c151d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..073b5b2b9aa59f2091fc62e0ccbe941d940c9f0d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/746967d3754041f1cfef9cc64a9093ae1f7ea58de21ff7208c444192c75c151d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3f10a457e45990b3b4f0796aa13724f772e6a051ead9f42c549e91345a23af +size 243146 diff --git a/data/2025/2504_07xxx/2504.07957/images/75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg b/data/2025/2504_07xxx/2504.07957/images/75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8513d7567fbab55862e8367216357f81d717980a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ce4a6c7c2a7dfd14f515616b37361ce03193ae14da2cfc7cdfc78cfb96d99f1 +size 1899 diff --git a/data/2025/2504_07xxx/2504.07957/images/779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg b/data/2025/2504_07xxx/2504.07957/images/779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85a3ffc2e511adbfdad544033aff3d2221ceca57 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9b24e2e8217781ad1d1d7c66e021f2cc7aa74d6c78c84c0c7bdb14661f6ebd5 +size 64118 diff --git a/data/2025/2504_07xxx/2504.07957/images/782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg b/data/2025/2504_07xxx/2504.07957/images/782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30de6725f267c3ba6074d1aac86a87c21152507c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5cc172c8eccc9de11d8415348b2a8b7ca87fd7e45bb9b351fe8fed992509b77 +size 1273 diff --git a/data/2025/2504_07xxx/2504.07957/images/7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg b/data/2025/2504_07xxx/2504.07957/images/7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67a258c9eb650d0cfcb1b628e408606b2d0fb061 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d952228467c1cfa2142a50bf41f954f861895e8fd58bf7927528b3e937e6253e +size 933 diff --git a/data/2025/2504_07xxx/2504.07957/images/7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg b/data/2025/2504_07xxx/2504.07957/images/7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cde529400ad7e1593b175bbf01538aaadf1d9516 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceb5fe6f5eca83c6334b9af5dceb73b9ca20217a31fb5a6b37e80469b0380e3a +size 27548 diff --git a/data/2025/2504_07xxx/2504.07957/images/7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg b/data/2025/2504_07xxx/2504.07957/images/7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4246869b0dfb103d7c592ff0526c1dffc942100 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce008086733fd1cbd5a3a7f7cf7e20af40183f89afa5f555d800ff2375ed0391 +size 1696 diff --git a/data/2025/2504_07xxx/2504.07957/images/7d4bda3009acb1f7beb7ea117256abba0234caba3ad6dd0ed42754e5c74ba40c.jpg b/data/2025/2504_07xxx/2504.07957/images/7d4bda3009acb1f7beb7ea117256abba0234caba3ad6dd0ed42754e5c74ba40c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a65c3760941fb64e5b3da9172ef9bff45ae3ed00 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7d4bda3009acb1f7beb7ea117256abba0234caba3ad6dd0ed42754e5c74ba40c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac63d514b96dbf320399ebc07ee1d8d24a9ed3a6cd3951f65142d57ee733874 +size 102131 diff --git a/data/2025/2504_07xxx/2504.07957/images/7d566bf817e91560624919137435e2e54084d3f343d7e69395cddc8f69c2ba35.jpg b/data/2025/2504_07xxx/2504.07957/images/7d566bf817e91560624919137435e2e54084d3f343d7e69395cddc8f69c2ba35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92b797cd89d94018b47a1aebb4834b88351bc1d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7d566bf817e91560624919137435e2e54084d3f343d7e69395cddc8f69c2ba35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61186db1e0ef7a9cb53af1a596ec6fd7254e182b80b2e042b3fecc834693af60 +size 70487 diff --git a/data/2025/2504_07xxx/2504.07957/images/7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg b/data/2025/2504_07xxx/2504.07957/images/7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..804e0b407cc9aed0b7b947edb8cdbf8e25e049a3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11cc2dc1e603ae79225ef71059efff624af5aa6857733a70b7db35dd4cd290ee +size 782 diff --git a/data/2025/2504_07xxx/2504.07957/images/7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg b/data/2025/2504_07xxx/2504.07957/images/7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91c50e9ccfe04f1c393536cdbd7b3463318dbb19 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfec2da96818f2bc73fab1e81ce087b76877ee267c33c847a2311d3eaf52caf1 +size 21410 diff --git a/data/2025/2504_07xxx/2504.07957/images/811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg b/data/2025/2504_07xxx/2504.07957/images/811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dedff22ccc753d6fe83d9fc2923d2cff6125dde2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568c7df3a32eaf119f0c21941375067cf30b22c2c9b7319dcba6f5beb74a9c56 +size 1442 diff --git a/data/2025/2504_07xxx/2504.07957/images/907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg b/data/2025/2504_07xxx/2504.07957/images/907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1998b66d7a930c88fde5bde0879a07706b8ceed9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac3f265e57a4bb0631e1fb0f03c9f0db220da9ee00da5aa1ade2eea3e443c244 +size 2840 diff --git a/data/2025/2504_07xxx/2504.07957/images/93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg b/data/2025/2504_07xxx/2504.07957/images/93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab9479348817ce01d117cd1777f2fa3e40267d34 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dab2aa4fe17cda4f29eba647bff6fc84717464d257e438eda4b6994e4b3fc7a +size 21599 diff --git a/data/2025/2504_07xxx/2504.07957/images/9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg b/data/2025/2504_07xxx/2504.07957/images/9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7fcb579278e5b37fd9710e75a6bca0b4795bad53 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bc680bb212b6c5d732429569b437f799b0d4f1108ab0360f4f989cfe14c47cf +size 1527 diff --git a/data/2025/2504_07xxx/2504.07957/images/9869dc9133a474107a7d86c90dc56f607a78d7c877ecf631366616bdfd7852f9.jpg b/data/2025/2504_07xxx/2504.07957/images/9869dc9133a474107a7d86c90dc56f607a78d7c877ecf631366616bdfd7852f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2df808a7a3dbacd9d740dd9751091052beb9db3c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/9869dc9133a474107a7d86c90dc56f607a78d7c877ecf631366616bdfd7852f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:641c92968bba4127b17532fa8f04b1c84b93edd55ff2b195872796e7e878315a +size 275683 diff --git a/data/2025/2504_07xxx/2504.07957/images/9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg b/data/2025/2504_07xxx/2504.07957/images/9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78544ef501bf5093db1daf6afd78925ef7c9b47c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:972dc067a39bcb6280f8c8f06aa67bf235645a9912e03fc5bb65de0a7b75d7ac +size 1578 diff --git a/data/2025/2504_07xxx/2504.07957/images/a0eaad4b1104e3743ad2ff61115c055cb525b491b47d3414f47134eaa66179b5.jpg b/data/2025/2504_07xxx/2504.07957/images/a0eaad4b1104e3743ad2ff61115c055cb525b491b47d3414f47134eaa66179b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a09a7206e462d2513aaf5dd12b918475b7f3ac84 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/a0eaad4b1104e3743ad2ff61115c055cb525b491b47d3414f47134eaa66179b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e26027b6c961e4a4d65f7b2bbb619fbde885ac5dec6ac3600fa4283015b1ecce +size 159873 diff --git a/data/2025/2504_07xxx/2504.07957/images/a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg b/data/2025/2504_07xxx/2504.07957/images/a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..697098247d306a3a693e8f24cc1aff0463a1b391 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8113fad5e23cb2760e4e50597714e23f981a68c63f36442cb3d2ce9ea7bac000 +size 9060 diff --git a/data/2025/2504_07xxx/2504.07957/images/a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg b/data/2025/2504_07xxx/2504.07957/images/a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56fb21e5ec1ae7cd69e762f226332115f4d57d54 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca28d6989520dca6875c3fba39049f4cc8e03137aad94a01e632a1e4889b0e38 +size 1071 diff --git a/data/2025/2504_07xxx/2504.07957/images/a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg b/data/2025/2504_07xxx/2504.07957/images/a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc5e6a7e0d96df42002c770e5fc6d83d1a7169af --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b81df6ee2d03e0ba82887e86c4cb79d917cfd44246923f5cb8431b8843b358e4 +size 1429 diff --git a/data/2025/2504_07xxx/2504.07957/images/a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg b/data/2025/2504_07xxx/2504.07957/images/a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90706c4c341b98b385d46356d4276453722a1ca1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6808714889b57630e244e82badbd97d702ab5b091827fd0d9e00c723799cd021 +size 1389 diff --git a/data/2025/2504_07xxx/2504.07957/images/b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg b/data/2025/2504_07xxx/2504.07957/images/b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afe278cd1728fe7093f5634259312c12f820bafe --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c61d988932aae63990afb6b1acbc5b9e0b406fbbdb7567fe1edf0404d8683f9 +size 1481 diff --git a/data/2025/2504_07xxx/2504.07957/images/b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg b/data/2025/2504_07xxx/2504.07957/images/b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01b7ef63bd35b1e7c7b73352a5b72d76151b2196 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4faadfb7a1d5d58e6caafb96e22d63fe54e803bb468f1b5ad31df85faf33225 +size 1279 diff --git a/data/2025/2504_07xxx/2504.07957/images/b3f4f0a77e2e8fb439f7a5a336c51d6a3b18c90d4e3d1baf3fdc99633a509a39.jpg b/data/2025/2504_07xxx/2504.07957/images/b3f4f0a77e2e8fb439f7a5a336c51d6a3b18c90d4e3d1baf3fdc99633a509a39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f6b852d6344528a004cfd280969c2fbf709fe3f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/b3f4f0a77e2e8fb439f7a5a336c51d6a3b18c90d4e3d1baf3fdc99633a509a39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a26b001508a10bee768ef96609a09c94a5abb6cf0b56b281783e64847b2893c4 +size 3265 diff --git a/data/2025/2504_07xxx/2504.07957/images/b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg b/data/2025/2504_07xxx/2504.07957/images/b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d438cd17ba3834661fa09a06fb1f83a51b5a7d11 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e80c83d9cd491a75cda6ac4235fdc871337354deefcafebed71cdaa2331ba66 +size 717 diff --git a/data/2025/2504_07xxx/2504.07957/images/bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg b/data/2025/2504_07xxx/2504.07957/images/bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce5b8f5bbc8f68e894845bdc479bbcd3571fb996 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e17bb2f831f69926061e6e6b14469f086b8eefd1f2aeb4040d4cb65e06719d5 +size 14985 diff --git a/data/2025/2504_07xxx/2504.07957/images/bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg b/data/2025/2504_07xxx/2504.07957/images/bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5e399183a60197a1943a86dad64a7c6e1ab7ec5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41537ddbdf083bf901e85a4c09e7b577afe83d90985cf954d96032f87147f9f8 +size 1723 diff --git a/data/2025/2504_07xxx/2504.07957/images/bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg b/data/2025/2504_07xxx/2504.07957/images/bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43621742e43ab660a442bac762be117cfabff500 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f44e27b84d9b9d2585976409c47e8a132c151f07a136fca2e4c353238c05bb3 +size 1549 diff --git a/data/2025/2504_07xxx/2504.07957/images/c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg b/data/2025/2504_07xxx/2504.07957/images/c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a75ad241d90e5b07731719bbded6e31937918d02 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed2405b6b9550a3a08cd8a54995ce15425c8c6a613ddcfee9c9f0bfde867199d +size 5472 diff --git a/data/2025/2504_07xxx/2504.07957/images/ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg b/data/2025/2504_07xxx/2504.07957/images/ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b995f3c883cb67e7a7cbb006719d741d2f31a1fc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a43c156e5c1e9e783436c91633d41bd9ecfbf9b15500b146480742421f6171a6 +size 1844 diff --git a/data/2025/2504_07xxx/2504.07957/images/d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg b/data/2025/2504_07xxx/2504.07957/images/d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..311efa4a01e3b11a53f34ba6b8563bfb7d79ba5e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f190ae14841c36a9ed3ab9255ef4d4733222621809ff46acd495ca82343e0a34 +size 2366 diff --git a/data/2025/2504_07xxx/2504.07957/images/d574d081608edd8f054ab18944cd1e36be7d96c5ca67d720264cf861d25c4a53.jpg b/data/2025/2504_07xxx/2504.07957/images/d574d081608edd8f054ab18944cd1e36be7d96c5ca67d720264cf861d25c4a53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae7d975024ebfdd5b79ccb794e7f76f26eaf2cc9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/d574d081608edd8f054ab18944cd1e36be7d96c5ca67d720264cf861d25c4a53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15b4d9c702135e54fd6c706187b24ead305db24f233058fe3177ba123d262075 +size 4986 diff --git a/data/2025/2504_07xxx/2504.07957/images/d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg b/data/2025/2504_07xxx/2504.07957/images/d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9342c60d4a0fb4025223c498041b5446fcd78a60 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01e6f183e86bfe6633748f596affdf57431e94ca7d3f8fdd196875599aa8b87b +size 14452 diff --git a/data/2025/2504_07xxx/2504.07957/images/da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg b/data/2025/2504_07xxx/2504.07957/images/da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..994c393b3317f62be1c580da2c65f425cb22c4c6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:929cd77b0f2bd75e6d6695b73ee35477f76b48de4b9167d0f01b9edc4fb1b1b0 +size 16794 diff --git a/data/2025/2504_07xxx/2504.07957/images/e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg b/data/2025/2504_07xxx/2504.07957/images/e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f3595a9f5900bcbd0cc7f574fc81d221b54dca0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e381494e0ebf78eae872d4268643f47a720b4dddcc5ac1593b80851cf602a1e2 +size 95462 diff --git a/data/2025/2504_07xxx/2504.07957/images/e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg b/data/2025/2504_07xxx/2504.07957/images/e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e2ea9a542f5d16286d6d9db07a6618a679ff9d4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dfa6697e3c136fa6473a3e8cf808797292031487f22d1514121ef2ad083c824 +size 19329 diff --git a/data/2025/2504_07xxx/2504.07957/images/ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg b/data/2025/2504_07xxx/2504.07957/images/ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78cead0a44df17da6729a1fb48600401416957e4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98df32e31c24ea6298fb38cc3ef54e2f0ac83a6bb9ec7c868416539d1642c2a6 +size 21593 diff --git a/data/2025/2504_07xxx/2504.07957/images/ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg b/data/2025/2504_07xxx/2504.07957/images/ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..141502e7e9fabfe8ba82facaa9026257ee0d5c59 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91efee1f82a5dc2c738a21855ddf9160b7637dde2c1dc9a92ed6de241372922c +size 802 diff --git a/data/2025/2504_07xxx/2504.07957/images/ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg b/data/2025/2504_07xxx/2504.07957/images/ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6446b0fb1ab7bdd0f618e4251d537a04c2f93506 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df78413e35de89be28ebf2d95591d0199ec7d402327d41fa27024b75f7de584 +size 2388 diff --git a/data/2025/2504_07xxx/2504.07957/images/ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg b/data/2025/2504_07xxx/2504.07957/images/ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e0cff3c4be1e8bb7e55e42bf970b2463b41d534 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:345df083daaf0115f9b2d3e848080852185845e237a6805989ba104d6169f136 +size 4279 diff --git a/data/2025/2504_07xxx/2504.07957/images/ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg b/data/2025/2504_07xxx/2504.07957/images/ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5447c919fe9739aaa2395ef22d1361836ba14c53 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff155499a495447558b0623e086cb430ebde2b0a9e7d4dbb35d12c3f63a9f4c6 +size 5735 diff --git a/data/2025/2504_07xxx/2504.07957/images/f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg b/data/2025/2504_07xxx/2504.07957/images/f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8614ceda50e0aa98046882e88d985d550d292792 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89bc20e81125a45f957a88c71b8bf86914611f0d4b7e44ba0061e14a2992550a +size 2198 diff --git a/data/2025/2504_07xxx/2504.07957/images/f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg b/data/2025/2504_07xxx/2504.07957/images/f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dce16baaae4be1633cb5c979e1fe7a2df58dc13 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9463c18386bdad0f08b1a8deb3cadf861a8a08acd2f9e4d742ade79af679fd8 +size 1023 diff --git a/data/2025/2504_07xxx/2504.07957/images/f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg b/data/2025/2504_07xxx/2504.07957/images/f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..598494b343827a27f6ef5900d008fc245a2486f2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c0e6ba8b2ddfbaf3b2d4922583b83f75fb8961def950f3c3b3a5a76418979b0 +size 1611 diff --git a/data/2025/2504_07xxx/2504.07957/images/f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg b/data/2025/2504_07xxx/2504.07957/images/f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cf21b644003c9a6b5a379165eab4ae96f61348f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e3a890f70b1b634f52fb94f5c98a46888d4f04f21dc50946facc050c15b099d +size 2542 diff --git a/data/2025/2504_07xxx/2504.07957/images/f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg b/data/2025/2504_07xxx/2504.07957/images/f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d2e726594cbdab4a1c32d84b1a47f3356f01042 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1e57b593a874d20c3f064d2d0d36b9958d56674d48a0f23cfeadc73933583e9 +size 25270 diff --git a/data/2025/2504_07xxx/2504.07957/images/fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg b/data/2025/2504_07xxx/2504.07957/images/fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8f4472f8b6372b2d789e1f3a9ae176c00c31481 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/images/fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cd5e1fcb87f951c7a1583e59579a2fa8cc315fa34381e68ec53b036ed70d8fd +size 1800 diff --git a/data/2025/2504_07xxx/2504.07957/layout.json b/data/2025/2504_07xxx/2504.07957/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1dd1e30bc3f9ea88d961ee5ec450252943ff639b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07957/layout.json @@ -0,0 +1,19960 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 122, + 103, + 489, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 103, + 489, + 121 + ], + "spans": [ + { + "bbox": [ + 122, + 103, + 489, + 121 + ], + "type": "text", + "content": "MM-IFEngine: Towards Multimodal Instruction Following" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "spans": [ + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "Shengyuan Ding" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Shenxi Wu" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Xiangyu Zhao" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Yuhang Zang" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2\\boxtimes}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Haodong Duan" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Xiaoyi Dong" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Pan Zhang" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Yuhang Cao" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Dahua Lin" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2,4,5}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": ", Jiaqi Wang" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2,6\\boxtimes}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "Fudan University " + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "Shanghai Jiaotong University " + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "The Chinese University of Hong Kong " + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "CPII under InnoHK " + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 73, + 141, + 536, + 200 + ], + "type": "text", + "content": "Shanghai Innovation Institute" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 60, + 228, + 165, + 381 + ], + "blocks": [ + { + "bbox": [ + 63, + 215, + 157, + 225 + ], + "lines": [ + { + "bbox": [ + 63, + 215, + 157, + 225 + ], + "spans": [ + { + "bbox": [ + 63, + 215, + 157, + 225 + ], + "type": "text", + "content": "(a) Current MMIF Bench" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 228, + 165, + 381 + ], + "lines": [ + { + "bbox": [ + 60, + 228, + 165, + 381 + ], + "spans": [ + { + "bbox": [ + 60, + 228, + 165, + 381 + ], + "type": "image", + "image_path": "f8b7a3511b4b56a4319ce0f5af8de1c97cdad29b7f56da0d870eccc27e2792ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 365, + 284, + 376 + ], + "lines": [ + { + "bbox": [ + 175, + 365, + 284, + 376 + ], + "spans": [ + { + "bbox": [ + 175, + 365, + 284, + 376 + ], + "type": "text", + "content": "1. Answer as if you are facing to the audience. \n2. Use No more than 60 words...." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 392, + 555, + 448 + ], + "lines": [ + { + "bbox": [ + 55, + 392, + 555, + 448 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 555, + 448 + ], + "type": "text", + "content": "Figure 1. (a) Limitations of existing Multimodal Instruction Following (IF) benchmarks. (b) Overview of the MM-IFEval benchmark, which significantly surpasses existing benchmarks in terms of constraint diversity, quantity, and instruction complexity. Our benchmark consists of Compose-Level (C-Level) problems that impose constraints on model outputs (e.g., format requirements, keyword limits) and Perception-Level (P-Level) problems that require reasoning about specific visual elements in images. (c) Our MM-IFEngine generates a large-scale, diverse training dataset suitable for both Supervised Fine-Tuning (SFT) and Direct Preference Optimization (DPO)." + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 217, + 214, + 244 + ], + "blocks": [ + { + "bbox": [ + 170, + 217, + 214, + 244 + ], + "lines": [ + { + "bbox": [ + 170, + 217, + 214, + 244 + ], + "spans": [ + { + "bbox": [ + 170, + 217, + 214, + 244 + ], + "type": "image", + "image_path": "ee039d219f172204d34b079a06e82c4fb8fe851e2819d7c69e5891a8db8ae899.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 251, + 211, + 268 + ], + "lines": [ + { + "bbox": [ + 170, + 251, + 211, + 268 + ], + "spans": [ + { + "bbox": [ + 170, + 251, + 211, + 268 + ], + "type": "text", + "content": "Various & Abundant" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 216, + 217, + 304, + 273 + ], + "blocks": [ + { + "bbox": [ + 168, + 270, + 212, + 277 + ], + "lines": [ + { + "bbox": [ + 168, + 270, + 212, + 277 + ], + "spans": [ + { + "bbox": [ + 168, + 270, + 212, + 277 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 216, + 217, + 304, + 273 + ], + "lines": [ + { + "bbox": [ + 216, + 217, + 304, + 273 + ], + "spans": [ + { + "bbox": [ + 216, + 217, + 304, + 273 + ], + "type": "image", + "image_path": "a378f0667b9cc7382a9e9850a7ce0d7cf5d50edbb696014f7c83556a3823502a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 275, + 261, + 283 + ], + "lines": [ + { + "bbox": [ + 216, + 275, + 261, + 283 + ], + "spans": [ + { + "bbox": [ + 216, + 275, + 261, + 283 + ], + "type": "text", + "content": "MTA-Bench (About 1k constraints)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 247, + 277, + 296, + 284 + ], + "lines": [ + { + "bbox": [ + 247, + 277, + 296, + 284 + ], + "spans": [ + { + "bbox": [ + 247, + 277, + 296, + 284 + ], + "type": "text", + "content": "(300 questions)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 270, + 297, + 293, + 315 + ], + "blocks": [ + { + "bbox": [ + 242, + 289, + 356, + 300 + ], + "lines": [ + { + "bbox": [ + 242, + 289, + 356, + 300 + ], + "spans": [ + { + "bbox": [ + 242, + 289, + 356, + 300 + ], + "type": "text", + "content": "(b) MM-IFEval Benchmark" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 270, + 297, + 293, + 315 + ], + "lines": [ + { + "bbox": [ + 270, + 297, + 293, + 315 + ], + "spans": [ + { + "bbox": [ + 270, + 297, + 293, + 315 + ], + "type": "image", + "image_path": "a3c586654cc1fd75bb35dbd94a5d9a308b95eadf05ded748ca85608e32953d6f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 252, + 316, + 296, + 321 + ], + "lines": [ + { + "bbox": [ + 252, + 316, + 296, + 321 + ], + "spans": [ + { + "bbox": [ + 252, + 316, + 296, + 321 + ], + "type": "text", + "content": "follow instruction" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 235, + 322, + 295, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 322, + 295, + 354 + ], + "spans": [ + { + "bbox": [ + 235, + 322, + 295, + 354 + ], + "type": "text", + "content": "To Say \nyou are the musician \nimage. Write about your \ns and feelings while \ning." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 171, + 312, + 208, + 350 + ], + "blocks": [ + { + "bbox": [ + 171, + 312, + 208, + 350 + ], + "lines": [ + { + "bbox": [ + 171, + 312, + 208, + 350 + ], + "spans": [ + { + "bbox": [ + 171, + 312, + 208, + 350 + ], + "type": "image", + "image_path": "1146acfca5d9086061b4a464479e45f75eb2dbde1aae95a753a84a6280d54ded.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 178, + 352, + 203, + 359 + ], + "lines": [ + { + "bbox": [ + 178, + 352, + 203, + 359 + ], + "spans": [ + { + "bbox": [ + 178, + 352, + 203, + 359 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 312, + 216, + 328, + 230 + ], + "blocks": [ + { + "bbox": [ + 312, + 216, + 328, + 230 + ], + "lines": [ + { + "bbox": [ + 312, + 216, + 328, + 230 + ], + "spans": [ + { + "bbox": [ + 312, + 216, + 328, + 230 + ], + "type": "image", + "image_path": "9482868896c9f2e21df0a05a6e6a138d23d6f7c7a0877327bcb605e2309fee18.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 330, + 217, + 433, + 228 + ], + "lines": [ + { + "bbox": [ + 330, + 217, + 433, + 228 + ], + "spans": [ + { + "bbox": [ + 330, + 217, + 433, + 228 + ], + "type": "text", + "content": "32 Categories of Constraints" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 330, + 234, + 419, + 245 + ], + "lines": [ + { + "bbox": [ + 330, + 234, + 419, + 245 + ], + "spans": [ + { + "bbox": [ + 330, + 234, + 419, + 245 + ], + "type": "text", + "content": "5.1 Average constraints" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 312, + 250, + 328, + 265 + ], + "blocks": [ + { + "bbox": [ + 312, + 250, + 328, + 265 + ], + "lines": [ + { + "bbox": [ + 312, + 250, + 328, + 265 + ], + "spans": [ + { + "bbox": [ + 312, + 250, + 328, + 265 + ], + "type": "image", + "image_path": "01829f3201f86c5241247f1c7856596385d02be6106a723acf901c1d6a6e9bde.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 330, + 253, + 433, + 264 + ], + "lines": [ + { + "bbox": [ + 330, + 253, + 433, + 264 + ], + "spans": [ + { + "bbox": [ + 330, + 253, + 433, + 264 + ], + "type": "text", + "content": "3 Evaluation metrics combined" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 312, + 270, + 328, + 284 + ], + "blocks": [ + { + "bbox": [ + 312, + 270, + 328, + 284 + ], + "lines": [ + { + "bbox": [ + 312, + 270, + 328, + 284 + ], + "spans": [ + { + "bbox": [ + 312, + 270, + 328, + 284 + ], + "type": "image", + "image_path": "9bc358e345c9b98ea3fb38fc86ce45afa38fa2fcdfcb276c74e120a498986945.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 363, + 285, + 384, + 297 + ], + "blocks": [ + { + "bbox": [ + 331, + 273, + 421, + 284 + ], + "lines": [ + { + "bbox": [ + 331, + 273, + 421, + 284 + ], + "spans": [ + { + "bbox": [ + 331, + 273, + 421, + 284 + ], + "type": "text", + "content": "400 high-quality samples" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 363, + 285, + 384, + 297 + ], + "lines": [ + { + "bbox": [ + 363, + 285, + 384, + 297 + ], + "spans": [ + { + "bbox": [ + 363, + 285, + 384, + 297 + ], + "type": "image", + "image_path": "b8a348f6987610ae0c95cad2a4e302007efae81f1c69689c59abf0b69f401b43.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 297, + 419, + 309 + ], + "lines": [ + { + "bbox": [ + 336, + 297, + 419, + 309 + ], + "spans": [ + { + "bbox": [ + 336, + 297, + 419, + 309 + ], + "type": "text", + "content": "100 Perception-Level" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 323, + 319, + 386, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 319, + 386, + 360 + ], + "spans": [ + { + "bbox": [ + 323, + 319, + 386, + 360 + ], + "type": "text", + "content": "To See letters can you identify covering the right half e poster? Output in from top to bottom and o right separated with '" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 309, + 366, + 375, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 366, + 375, + 376 + ], + "spans": [ + { + "bbox": [ + 309, + 366, + 375, + 376 + ], + "type": "text", + "content": "R,e,a,d,i,n,a,f,u" + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 396, + 312, + 429, + 350 + ], + "blocks": [ + { + "bbox": [ + 363, + 312, + 389, + 318 + ], + "lines": [ + { + "bbox": [ + 363, + 312, + 389, + 318 + ], + "spans": [ + { + "bbox": [ + 363, + 312, + 389, + 318 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 396, + 312, + 429, + 350 + ], + "lines": [ + { + "bbox": [ + 396, + 312, + 429, + 350 + ], + "spans": [ + { + "bbox": [ + 396, + 312, + 429, + 350 + ], + "type": "image", + "image_path": "5d2246d97d8d1fe22d28c2487f238cb66c3ff7dafcb28d5e72d52ca7359ca660.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 355, + 427, + 361 + ], + "lines": [ + { + "bbox": [ + 397, + 355, + 427, + 361 + ], + "spans": [ + { + "bbox": [ + 397, + 355, + 427, + 361 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 443, + 243, + 510, + 284 + ], + "blocks": [ + { + "bbox": [ + 446, + 216, + 525, + 236 + ], + "lines": [ + { + "bbox": [ + 446, + 216, + 525, + 236 + ], + "spans": [ + { + "bbox": [ + 446, + 216, + 525, + 236 + ], + "type": "text", + "content": "(c) MM-IF Dataset SFT & DPO" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 443, + 243, + 510, + 284 + ], + "lines": [ + { + "bbox": [ + 443, + 243, + 510, + 284 + ], + "spans": [ + { + "bbox": [ + 443, + 243, + 510, + 284 + ], + "type": "image", + "image_path": "c591eb706a2d3bf120b3452026a44ca6fc90941ddf2eea9b2d4a992aafbd80ed.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 511, + 244, + 538, + 265 + ], + "lines": [ + { + "bbox": [ + 511, + 244, + 538, + 265 + ], + "spans": [ + { + "bbox": [ + 511, + 244, + 538, + 265 + ], + "type": "text", + "content": "23kData generated with our MMTE Engine" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 520, + 266, + 538, + 285 + ], + "blocks": [ + { + "bbox": [ + 520, + 266, + 538, + 285 + ], + "lines": [ + { + "bbox": [ + 520, + 266, + 538, + 285 + ], + "spans": [ + { + "bbox": [ + 520, + 266, + 538, + 285 + ], + "type": "image", + "image_path": "75cfe8b804245d7779058c9694eb4ff6df96eb7847bb5c12936a31f85a800642.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 287, + 530, + 298 + ], + "lines": [ + { + "bbox": [ + 449, + 287, + 530, + 298 + ], + "spans": [ + { + "bbox": [ + 449, + 287, + 530, + 298 + ], + "type": "text", + "content": "Compose a brief poem inspired by the cozy and serene." + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 435, + 323, + 449, + 343 + ], + "blocks": [ + { + "bbox": [ + 449, + 299, + 535, + 315 + ], + "lines": [ + { + "bbox": [ + 449, + 299, + 535, + 315 + ], + "spans": [ + { + "bbox": [ + 449, + 299, + 535, + 315 + ], + "type": "text", + "content": "Each stanza should have 4 lines. Your output should include a metaphor" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 435, + 323, + 449, + 343 + ], + "lines": [ + { + "bbox": [ + 435, + 323, + 449, + 343 + ], + "spans": [ + { + "bbox": [ + 435, + 323, + 449, + 343 + ], + "type": "image", + "image_path": "811b4f1336891501d9f68e8fa390c7b31be3c17edd73089b93c5ac3750611e0a.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 322, + 534, + 346 + ], + "lines": [ + { + "bbox": [ + 452, + 322, + 534, + 346 + ], + "spans": [ + { + "bbox": [ + 452, + 322, + 534, + 346 + ], + "type": "text", + "content": "In a room where light gently plays, \nA haven carved from nature's ways. \nThe river whispers calm and clear, \nSerene as thoughts that banish fear..." + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 436, + 355, + 449, + 372 + ], + "blocks": [ + { + "bbox": [ + 436, + 355, + 449, + 372 + ], + "lines": [ + { + "bbox": [ + 436, + 355, + 449, + 372 + ], + "spans": [ + { + "bbox": [ + 436, + 355, + 449, + 372 + ], + "type": "image", + "image_path": "a893f351b5971c3de588c7867fd7f15eec8696742e9e1f11d2c17a7a3e056f51.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 350, + 522, + 361 + ], + "lines": [ + { + "bbox": [ + 454, + 350, + 522, + 361 + ], + "spans": [ + { + "bbox": [ + 454, + 350, + 522, + 361 + ], + "type": "text", + "content": "In a world of hustle and bustle \nA haven of peace and solitude." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 455, + 367, + 529, + 379 + ], + "lines": [ + { + "bbox": [ + 455, + 367, + 529, + 379 + ], + "spans": [ + { + "bbox": [ + 455, + 367, + 529, + 379 + ], + "type": "text", + "content": "Soft curtains dance in the breeze. As the sun's rays gently caress." + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 537, + 327, + 549, + 340 + ], + "blocks": [ + { + "bbox": [ + 537, + 327, + 549, + 340 + ], + "lines": [ + { + "bbox": [ + 537, + 327, + 549, + 340 + ], + "spans": [ + { + "bbox": [ + 537, + 327, + 549, + 340 + ], + "type": "image", + "image_path": "782f241997aa01eaabff7691aa2404853a7d7988ff1031db0803c4d8801ec1d3.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 537, + 357, + 549, + 371 + ], + "blocks": [ + { + "bbox": [ + 537, + 357, + 549, + 371 + ], + "lines": [ + { + "bbox": [ + 537, + 357, + 549, + 371 + ], + "spans": [ + { + "bbox": [ + 537, + 357, + 549, + 371 + ], + "type": "image", + "image_path": "253ed87f97d5bd866869740de70181822344ed1736d653dd6b6e2f7568de7895.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "bbox": [ + 153, + 470, + 199, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 470, + 199, + 481 + ], + "spans": [ + { + "bbox": [ + 153, + 470, + 199, + 481 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 55, + 497, + 297, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 497, + 297, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 497, + 297, + 689 + ], + "type": "text", + "content": "The Instruction Following (IF) ability measures how well Multi-modal Large Language Models (MLLMs) understand exactly what users are telling them and whether they are doing it right. Existing multimodal instruction following training data is scarce, the benchmarks are simple with atomic instructions, and the evaluation strategies are imprecise for tasks demanding exact output constraints. To address this, we present MM-IFEngine, an effective pipeline to generate high-quality image-instruction pairs. Our MM-IFEngine pipeline yields large-scale, diverse, and high-quality training data MM-IFInstruct-23k, which is suitable for Supervised Fine-Tuning (SFT) and extended as MM-IFDPO-23k for Direct Preference Optimization (DPO). We further introduce MM-IFEval, a challenging and diverse multi-modal instruction-following benchmark that includes (1) both compose-level constraints for output re" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "spans": [ + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "text", + "content": "sponses and perception-level constraints tied to the input images, and (2) a comprehensive evaluation pipeline incorporating both rule-based assessment and judge model. We conduct SFT and DPO experiments and demonstrate that fine-tuning MLLMs on MM-IFInstruct-23k and MM-IFDPO-23k achieves notable gains on various IF benchmarks, such as MM-IFEval " + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "inline_equation", + "content": "(+10.2\\%)" + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "text", + "content": ", MIA " + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "inline_equation", + "content": "(+7.6\\%)" + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "text", + "content": ", and IFEval " + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "inline_equation", + "content": "(+12.3\\%)" + }, + { + "bbox": [ + 313, + 460, + 556, + 579 + ], + "type": "text", + "content": ". We have fully open-sourced the datasets (both SFT and DPO), evaluation code and training scripts at https://github.com/SYuan03/MM-IFEngine." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 314, + 608, + 394, + 619 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 608, + 394, + 619 + ], + "spans": [ + { + "bbox": [ + 314, + 608, + 394, + 619 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 313, + 629, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 555, + 713 + ], + "type": "text", + "content": "Instruction Following (IF) is a fundamental ability in Large Language Models (LLMs) [14, 27, 35, 53, 57] and Multimodal Large Language Models (MLLMs) [2, 34], which involves accurately interpreting and executing user-provided instructions. This ability is crucial for deploying models in real-world applications where users expect precise and context-aware responses, such as code" + } + ] + } + ], + "index": 49 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 702, + 227, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 702, + 227, + 713 + ], + "spans": [ + { + "bbox": [ + 69, + 702, + 227, + 713 + ], + "type": "text", + "content": "* Equal contribution.☑ Corresponding authors." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 14, + 209, + 36, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 36, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 36, + 559 + ], + "type": "text", + "content": "arXiv:2504.07957v2 [cs.CV] 27 Apr 2025" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 52 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 168 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 168 + ], + "type": "text", + "content": "generation [44], visual question answering [17], robots [38], and creative content creation [58]. For instance, in a VQA scenario, when a user asks an MLLM what is the object and how do I use it, return the object name and the usage instructions in a JSON format, accurate IF ensures the model provides a response like {object': 'hammer', 'usage': 'use it to drive nails'} instead of the plain text." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 170, + 298, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 170, + 298, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 170, + 298, + 373 + ], + "type": "text", + "content": "Achieving precise IF in multimodal, diverse, and open-ended environments presents significant challenges for both model training and benchmark evaluation. One significant limitation is the scarcity of high-quality IF training data to train open-source MLLMs. In addition, current multimodal IF benchmarks [2, 34] merely have simple, atomic instructions, and the constraints are weakly correlated with visual content (see Fig. 1 (a)). Consequently, existing benchmarks lack the diversity required for real-world applications, leading to saturated results where nearly all models achieve over " + }, + { + "bbox": [ + 56, + 170, + 298, + 373 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 56, + 170, + 298, + 373 + ], + "type": "text", + "content": ". Furthermore, the evaluation method in existing benchmarks often relies on LLM-as-a-judge [56], which is imprecise for instructions demanding exact output constraints, such as word counts. Therefore, the combination of limited training data, simple benchmarks, and imprecise evaluation strategy strongly restricts the progress of current MLLMs in IF." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 376, + 298, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 298, + 591 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 298, + 591 + ], + "type": "text", + "content": "To address the lack of high-quality IF training data and challenging benchmarks, we propose MM-IFEngine, an effective pipeline for generating high-quality image-instruction pairs. MM-IFEngine collects diverse image sources, including natural scenes, UI interfaces, diagrams, charts, and mathematical problems. We then employ a structured approach using a predefined set of 16 task descriptions and 32 constraints to guide the LLM in crafting tailored instructions for each image. Using MM-IFEngine, we generated a comprehensive dataset of image-instruction pairs, collected responses from open-source MLLMs, and applied rigorous post-processing to retain only high-quality instruction-answer pairs, thus constructing MM-IFInstruct-23k for Supervised Fine-Tuning (SFT). We also generate negative responses by selectively removing constraints from the original data, constructing the preference dataset MM-IFDPO-23k for preference optimization algorithms such as Direct Preference Optimization (DPO) [36]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 594, + 298, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 298, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 298, + 714 + ], + "type": "text", + "content": "To facilitate the evaluation of multimodal IF, we present MM-IFEval, a benchmark comprising 400 challenging problems with diverse compose-level and perception-level instructions. MM-IFEval is derived from the images and instructions generated by MM-IFEngine with human-labeled annotations. As presented in Fig. 1 (b), our MM-IFEval has the following three distinctive features: (1) Diverse Instruction Types: MM-IFEval has 32 distinct constraints, ensuring a wide range of instruction complexities and surpassing the scope of prior benchmarks. (2) Hybrid Evaluation: we use" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "text", + "content": "a hybrid strategy including both rule-based verification and judge model. For subjective instructions (e.g., mimicking tone), we design a comparative judgment for precise evaluation. Specifically, a control output is generated without the constraint, and the LLM judge compares both outputs for precise evaluation. (3) Challenging: the leading proprietary model (GPT-4o at " + }, + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "inline_equation", + "content": "64.6\\%" + }, + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "text", + "content": " ) and open-source model (Qwen2-VL-72B at " + }, + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "inline_equation", + "content": "50.8\\%" + }, + { + "bbox": [ + 313, + 72, + 556, + 203 + ], + "type": "text", + "content": " ) demonstrating substantial room for improvement on our benchmark, highlights a significant opportunity for improvement in multimodal instruction following." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "text", + "content": "We further demonstrate that fine-tuning MLLMs on either MM-IFInstruct-23k or MM-IFDPO-23k consistently boosts the performance of MLLMs on instruction following benchmarks, without compromising their original capabilities on other Visual Question Answering (VQA) benchmarks. Specifically, fine-tuning Qwen2-VL-7B on MM-IFDPO-23k with the DPO results in performance gains of " + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "inline_equation", + "content": "10.2\\%" + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "inline_equation", + "content": "7.6\\%" + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "inline_equation", + "content": "12.3\\%" + }, + { + "bbox": [ + 313, + 204, + 556, + 312 + ], + "type": "text", + "content": " on MM-IFInstruct-23k, MIA-Bench [34], and IFEval [57], respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 312, + 557, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 312, + 557, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 312, + 557, + 443 + ], + "type": "text", + "content": "Our contributions include: (1) a MM-IFEngine pipeline for generating multimodal constraint-rich image-instruction pairs; (2) a large-scale training dataset MM-IFInstruct-23k and preference optimization dataset MM-IFDPO-23k derived from MM-IFEngine; (3) a challenging multimodal instruction following benchmark MM-IFEval with diverse constraints and comprehensive evaluation approaches; and (4) empirical evidence showing significant performance gains on both our MM-IFEval and existing benchmarks when training MLLMs on MM-IFInstruct-23k via SFT and MM-IFDPO-23k via DPO." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 453, + 402, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 453, + 402, + 467 + ], + "spans": [ + { + "bbox": [ + 314, + 453, + 402, + 467 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 474, + 556, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 556, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 556, + 700 + ], + "type": "text", + "content": "Instruction Following in LLMs. Various benchmarks and training approaches have been proposed to make Large Language Models (LLMs) better align with human instructions. While existing Instruction Following (IF) benchmarks like [14, 35, 53, 57] all aim to evaluate instruction following, they differ significantly in their dataset construction pipelines, driven by their unique constraint taxonomies. CFBench [53], for instance, constructs its dataset using a combination of taxonomic and statistical methodologies to establish comprehensive constraints. This divergence extends to their evaluation strategies. For example, InFoBench [35] adopts a strategy of decomposing complex instructions into simpler assessment standards. Beyond benchmarks, various training approaches aim to enhance LLMs' instruction-following capabilities [29, 44], including in-context learning [58] and preference optimization [54]. However, he aforementioned research is limited to the text modality, whereas our work focuses on multi-modal instruction following with vision inputs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 701, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 701, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 701, + 556, + 713 + ], + "type": "text", + "content": "Instruction Following Benchmarks in MLLMs. Numerical" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 72, + 202, + 345 + ], + "blocks": [ + { + "bbox": [ + 63, + 72, + 202, + 345 + ], + "lines": [ + { + "bbox": [ + 63, + 72, + 202, + 345 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 202, + 345 + ], + "type": "image", + "image_path": "779b73debc619571e7701ceb41cc0821a65f6b6bae44f37cd947132bc6dea8dd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 358, + 555, + 415 + ], + "lines": [ + { + "bbox": [ + 55, + 358, + 555, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 555, + 415 + ], + "type": "text", + "content": "Figure 2. Overall pipeline of MM-IFEngine. Part (a) demonstrates the three-stage workflow of our engine: (1) Image filter; (2) Task generation using GPT-4o for images without QA pairs and instruct refinement for existing annotations; and (3) Constraints integration incorporating 6 main categories and 32 subcategories, ensuring compatibility between constraints and tasks. MM-IFEngine is employed to generate SFT and DPO training datasets and MM-IFEval benchmark, as shown in part (b) and (c). MM-IFEval implements three evaluation metrics combining rule-based verification functions and a judge model to ensure accurate assessment." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 205, + 72, + 369, + 346 + ], + "blocks": [ + { + "bbox": [ + 205, + 72, + 369, + 346 + ], + "lines": [ + { + "bbox": [ + 205, + 72, + 369, + 346 + ], + "spans": [ + { + "bbox": [ + 205, + 72, + 369, + 346 + ], + "type": "image", + "image_path": "22a8743138bb9705cbfaa1f460aa340a3dd2f922340969a4a75e4547935fad07.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 372, + 72, + 553, + 346 + ], + "blocks": [ + { + "bbox": [ + 372, + 72, + 553, + 346 + ], + "lines": [ + { + "bbox": [ + 372, + 72, + 553, + 346 + ], + "spans": [ + { + "bbox": [ + 372, + 72, + 553, + 346 + ], + "type": "image", + "image_path": "e82b943d134af69e1fe089952dc36d51fc81bd96571d3c00377c3b8f701a9907.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 435, + 297, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 435, + 297, + 590 + ], + "spans": [ + { + "bbox": [ + 54, + 435, + 297, + 590 + ], + "type": "text", + "content": "ous benchmarks [18] have been proposed to evaluate diverse capabilities of Multi-modal Large Language Models (MLLMs), including general knowledge [5, 24, 48, 50], document understanding [15, 25, 30], perception [43, 52], multi-image comprehension [26, 39, 40], and instruction following (IF) [2, 34]. MIA-Bench [34] and VisIT-Bench [2] are representative IF benchmarks that employ GPT-4 [32] for question generation and evaluation. In contrast to existing IF benchmarks, our MM-IFEval introduces significant improvements in diversity (32 constraint categories covering compositional and perceptual aspects), difficulty (averaging 5.1 constraints per question), and evaluation precision (using both judge models and rule-based verification)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 594, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 297, + 714 + ], + "type": "text", + "content": "Instruction Tuning Data for MLLMs. Recent advancements in multi-modal instruction tuning data aim to improve cross-modal alignment and increase the variety of tasks handled by MLLMs [4, 8, 20, 26, 45, 46, 51]. For example, some previous works [3, 4, 23] build synthetic instruction tuning data generated using GPT-4V [33], enabling open-source MLLMs to achieve performance comparable to proprietary models across multiple benchmarks. However, existing instruction tuning data are mainly designed for general knowledge or visual perception, and data for" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 435, + 555, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 555, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 555, + 472 + ], + "type": "text", + "content": "improving the IF abilities is scarce. The scarcity of training data for enhancing IF abilities motivated the development of our MM-IFEngine pipeline." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 484, + 405, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 484, + 405, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 484, + 405, + 498 + ], + "type": "text", + "content": "3. MM-IFEngine" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 506, + 556, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 556, + 662 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 556, + 662 + ], + "type": "text", + "content": "We employ the MM-IFEngine pipeline to generate image-instruction pairs, which are the foundation for creating instruction tuning data and our benchmark. As shown in Fig. 2 (a), the pipeline is composed of three main stages: (1) image filtering, where we systematically select a diverse set of images from multiple sources to ensure broad coverage of visual content; (2) task generation, in which we either synthesize novel tasks tailored to the selected images or refine existing instruction templates to better align with the image content; and (3) constraint integration, where high-quality, constraint-aware instructions are generated for images that initially lack associated annotated guidance, thereby enhancing the richness and precision of the dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 671, + 395, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 671, + 395, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 671, + 395, + 685 + ], + "type": "text", + "content": "3.1. Image Filter" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "Our image filtering strategy selects only high-quality images by removing those with low resolution or limited semantic" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": "richness. For unannotated pure image datasets (e.g., CC3M [37]), we prioritize natural scene images. Rich semantic content in these images enables the creation of more comprehensive and insightful QA pairs, which is crucial for designing diverse and complex instruction following tasks. We use the IC9600 and RAM metric proposed in the previous method [55] to select the images that have rich semantic content." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "content": "Furthermore, we analyze existing annotated datasets, such as ALLaVA [3]. Our analysis reveals that some images suffer from low resolution, making them inadequate for the instruction-following task. Given our intention to design more intricate and varied instruction following tasks based on this data, we filter out data items containing low-quality images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 259, + 156, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 259, + 156, + 270 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 156, + 270 + ], + "type": "text", + "content": "3.2. Task Generation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 276, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 276, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 276, + 295, + 407 + ], + "type": "text", + "content": "Image Source without Original QA Pairs. For image datasets lacking original annotated task instructions (e.g., CC3M [37]), we first design appropriate task instructions for the data items. We first develop a series of task instructions tailored to the data items. These instructions are crafted to elicit long-form responses that can be subsequently modified or refined using various constraints, for instance, Provide a detailed analysis of the image, including the setting, characters, and notable objects. The final task pool " + }, + { + "bbox": [ + 55, + 276, + 295, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_T" + }, + { + "bbox": [ + 55, + 276, + 295, + 407 + ], + "type": "text", + "content": " comprises a total of 16 distinct tasks, with further details available in Appendix A.1.2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": "Given the task pool " + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_T" + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": ", we randomly select " + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": " tasks as examples of task types for each image " + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": ". We then prompt a powerful language model " + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": " (e.g., GPT-4o) to generate an appropriate task list " + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "inline_equation", + "content": "T_l" + }, + { + "bbox": [ + 55, + 408, + 296, + 468 + ], + "type": "text", + "content": " that aligns with the image content. The process is formulated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 136, + 479, + 295, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 479, + 295, + 493 + ], + "spans": [ + { + "bbox": [ + 136, + 479, + 295, + 493 + ], + "type": "interline_equation", + "content": "\\left\\{T _ {l} ^ {*} \\right\\} = \\mathcal {M} \\left(I, T _ {e}\\right) \\tag {1}", + "image_path": "b3f4f0a77e2e8fb439f7a5a336c51d6a3b18c90d4e3d1baf3fdc99633a509a39.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{e} = \\{T_{1}, T_{2}, \\ldots, T_{k}\\}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": " and each " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{i} \\in \\mathcal{P}_{T}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": ". The model " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": " is tasked with either choosing relevant tasks from " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{e}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": " or supplementing reasonable tasks to construct the appropriate task list " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{l}^{*}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": ", ensuring that all tasks in " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{l}^{*}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": " are in line with the image content. After generating the " + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "inline_equation", + "content": "T_{l}^{*}" + }, + { + "bbox": [ + 55, + 498, + 295, + 593 + ], + "type": "text", + "content": ", a sampling step is incorporated to guarantee task diversity. For each image, tasks are sampled. This sampling process is crucial as it enriches the variety of tasks associated with each image." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "text", + "content": "Image Source with QA Pairs. In the case of image datasets that have QA pairs (e.g., ALLaVA [3]), we adopt certain strategies for processing the original question annotations. We choose ALLaVA as the primary dataset for this type of image source due to its rich and diverse image content, which is accompanied by a variety of task types. First, we conduct an analysis of the original question annotations. We find that some of the questions are accompanied by some few-shot examples. Additionally, some questions in ALLaVA have options in their original annotations, which are not" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": "suitable for our instruction-following task. Since we need to incorporate certain constraints into the original instructions in the subsequent steps, we use regular expressions and length limits to filter the questions in ALLaVA. Specifically, we select those questions that do not have few-shot examples associated with them. Mathematically, if we let " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " be the set of all questions in ALLaVA, " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "Q_{fs}" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " be the subset of questions with few-shot examples, and " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "Q_{op}" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " be the subset of questions with options. We aim to find the subset " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "Q_{s}" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " of questions that satisfy the conditions:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 361, + 201, + 555, + 214 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 201, + 555, + 214 + ], + "spans": [ + { + "bbox": [ + 361, + 201, + 555, + 214 + ], + "type": "interline_equation", + "content": "Q _ {s} = \\left\\{q \\in Q | q \\notin Q _ {f s} \\wedge q \\notin Q _ {o p} \\right\\} \\tag {2}", + "image_path": "d574d081608edd8f054ab18944cd1e36be7d96c5ca67d720264cf861d25c4a53.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "spans": [ + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "text", + "content": "where the filtering based on the absence of few-shot examples and options is achieved using regular expressions and length limits. Then, we get the expected " + }, + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "inline_equation", + "content": "T^{*}" + }, + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "text", + "content": " in our filter " + }, + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "inline_equation", + "content": "Q_{s}" + }, + { + "bbox": [ + 313, + 222, + 555, + 270 + ], + "type": "text", + "content": " set for the images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 277, + 447, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 277, + 447, + 290 + ], + "spans": [ + { + "bbox": [ + 313, + 277, + 447, + 290 + ], + "type": "text", + "content": "3.3. Constraints Integration" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 295, + 555, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 295, + 555, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 295, + 555, + 437 + ], + "type": "text", + "content": "Constraints Pool " + }, + { + "bbox": [ + 313, + 295, + 555, + 437 + ], + "type": "inline_equation", + "content": "(\\mathcal{P}_C)" + }, + { + "bbox": [ + 313, + 295, + 555, + 437 + ], + "type": "text", + "content": " We use instruction to refer to the entire textual input, which in our paper can generally be viewed as a composition of a task instruction and multiple constraints instruction. Tasks and constraints are rich and diverse, with a certain complexity in our work. All the constraints in our work can be further classified into six major categories, each with its own unique characteristics and applications: Text Length Requirements, Mathematical Requirements, Language & Formatting Requirements, Rhetoric & Logic Requirements, Action Requirements, and Keyword Requirements. Please refer to the Appendix Fig. 5 for more details of all the constraints." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 439, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 439, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 439, + 556, + 715 + ], + "type": "text", + "content": "Given the constraints pool " + }, + { + "bbox": [ + 313, + 439, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_C" + }, + { + "bbox": [ + 313, + 439, + 556, + 715 + ], + "type": "text", + "content": " and task instructions, a straightforward approach for composing full instruction is to first set several constraints for each constraint type and then randomly select one constraint from some of the types to compose the constraint list, and finally concatenate the constraint list with the task instruction to form the full instruction. But this direct method has two problems: (1) The constraints are not diverse enough, which may not be able to fully evaluate the ability of the model. (2) The contradiction between the constraints and also between the constraints and the task instruction may exist. For the first problem, an LLM is employed to generate concrete content of constraint instruction for the specific constraint type in our method. In order to avoid the generated content being too divergent or hard to control its difficulty, we carefully design some cases or requirements of details that needed to be paid attention to when generating the content for each constraint type (Appendix A.1.1). For the second problem, we also use a powerful LLM to help keep the correlation of constraints with its instruction and filter out those that cause total contradiction. Finally, we prompt an LLM to check whether the constraints and the task instruction are compatible and filter out those failing to pass the check. Our method not only" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 95 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 95 + ], + "type": "text", + "content": "ensures the compatibility of constraints and instructions but also enriches the diversity of constraints." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "text", + "content": "In our actual practice process, we find that although we prompt the LLM to select appropriate constraints that should be compatible with the task instruction and other constraints, the generated constraints still have some contradiction with the task instruction, especially on those existing datasets with various kinds of annotations. The reason is that these datasets are designed for overall question-answering tasks, and the question(or named task instruction) tends to be contradictory with the constraints, which are mostly compatible with those tasks of creating or answering in non-short form. So, we decouple the selection and generation steps for this type of data source. Specifically, we first select the constraints from the constraints pool " + }, + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_C" + }, + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "text", + "content": " and then provide the selected mostly compatible constraints to the LLM to select secondly and generate final constraints. But for image datasets without original QA pairs, in other words, for which we generate task instructions for them using " + }, + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_T" + }, + { + "bbox": [ + 56, + 97, + 296, + 335 + ], + "type": "text", + "content": ", we directly sample k constraint types for the LLM to generate concrete content because they are mostly compatible with the pre-designed task instruction. The uniform process is formulated as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 344, + 295, + 359 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 344, + 295, + 359 + ], + "spans": [ + { + "bbox": [ + 102, + 344, + 295, + 359 + ], + "type": "interline_equation", + "content": "C _ {l} ^ {*} = \\mathcal {L} \\left(C _ {s}, T ^ {*}\\right), C _ {f} ^ {*} = \\mathcal {V} \\left(C _ {l} ^ {*}, T ^ {*}\\right) \\tag {3}", + "image_path": "40a6ec4b503c24eee160a53d54fb61c79b65802f8cbc3c328e2ec808cb3733e6.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^*" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": " is the task applicable to the image. The model " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": " is tasked with both choosing appropriate constraint types from " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": " again and generating concrete constraints for some of them, whose output is a list of concrete constraint descriptions. To ensure that the generated constraints remain compatible with the given task instruction " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "T^*" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": ", we employ a final validation step using another LLM process, denoted as " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": ". This validation function checks whether each constraint in " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "C_l^*" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": " aligns with " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "T^*" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": " and filters out those that contradict or do not fit the task instruction. The resulting set of fully verified and compatible constraints is represented as " + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "inline_equation", + "content": "C_f^*" + }, + { + "bbox": [ + 55, + 366, + 295, + 498 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": "MM-IFInstruct-23k Construction. By applying the MM-IFEngine pipeline, we construct the MM-IFInstruct-23k dataset, which contains 23k high-quality multi-modal instruction-following training data. We first take an analysis of the performance of the current open-source MLLMs and proprietary MLLMs on several benchmarks [25, 34], and find that for instruction-following capability, the most powerful open-source MLLM like InternVL2.5-78B-MPO [42] is nearly equivalent to GPT-4o, and the performance on general VQA benchmarks are even higher than GPT-4o. Thus, we use InternVL2.5-78B-MPO to generate responses for our MM-IFInstruct-23k dataset. Despite its capabilities, the InternVL2.5-78B-MPO model encounters difficulties in ensuring " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": " compliance with our constraints, a challenge attributed to the complexity, number, and comprehensiveness. Consequently, we implement a post-processing stage to filter out responses that do not meet the specified criteria. Acknowledging that achieving perfect constraint adherence" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 555, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 156 + ], + "type": "text", + "content": "might be challenging even for human annotators on this task, we set a practical accuracy threshold of " + }, + { + "bbox": [ + 313, + 72, + 555, + 156 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 313, + 72, + 555, + 156 + ], + "type": "text", + "content": ". Finally, our MM-IFInstruct-23k comprises 23k data items, with 16k constructed from the training set of CC3M, 6k from ALLaVA, and 4k from the training set of MultiUI, Geo170k[12] and ChartQA[31]. We show the distribution of constraints number of MM-IFInstruct-23k in Fig. 3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 157, + 556, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 157, + 556, + 383 + ], + "spans": [ + { + "bbox": [ + 313, + 157, + 556, + 383 + ], + "type": "text", + "content": "MM-IFDPO-23k Construction. To comprehensively explore and make full use of our high-quality data, we also utilize MM-IFEngine to construct MM-IFDPO-23k, a preference dataset comprising chosen and rejected samples suitable for Direct Preference Optimization (DPO) [36]. Our high-quality data can be directly employed as the chosen samples. Regarding rejected samples, we opt to utilize Qwen2-VL-7B-Instruct to answer the variant of the question for generating rejected pairs. Specifically, we have four distinct settings for generating negative pairs, which mainly differ in the input to Qwen2-VL-7B-Instruct. These settings include (1) With image, but randomly remove one-third of the number of constraints in the prompt; (2) With image, but randomly remove two-thirds of the number of constraints in the prompt; (3) With image, but randomly remove all the constraints in the prompt; and (4) Full prompt, but without the image; We use these four types of input to feed into Qwen2-VL-7B-Instruct model, and collect the rejected responses to construct the MM-IFDPO-23k." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 393, + 391, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 393, + 391, + 406 + ], + "spans": [ + { + "bbox": [ + 314, + 393, + 391, + 406 + ], + "type": "text", + "content": "4. MM-IFEval" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 414, + 555, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 414, + 555, + 486 + ], + "spans": [ + { + "bbox": [ + 313, + 414, + 555, + 486 + ], + "type": "text", + "content": "Existing benchmarks for multi-modal instruction following are scarce. The majority focus on simple and atomic instructions, resulting in performance saturation across models. To address this limitation, we introduce MM-IFEval, a human-annotated, comprehensive, and challenging benchmark designed for evaluating multi-modal IF." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 493, + 457, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 493, + 457, + 504 + ], + "spans": [ + { + "bbox": [ + 313, + 493, + 457, + 504 + ], + "type": "text", + "content": "4.1. MM-IFEval Construction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 510, + 556, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 556, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 556, + 665 + ], + "type": "text", + "content": "To construct the MM-IFEval, we first use our MM-IFEngine to generate the question-answer (QA) pairs for images. The generated instructions may inherently contain potential conflicts. Consequently, human annotation remains critical for constructing this benchmark, as human annotators possess the cognitive capacity for comprehensive assessment of these complex situations. After the human annotation, we further use an extra post-processing step that prompts the LLMs to double-check and mitigate the occurrence of constraint conflicts as much as possible. Finally, we construct the MM-IFEval bench of 400 questions, 300 of which are compose-level open-ended questions and 100 perception-level questions with ground truth." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "content": "Diverse Constraints. With 32 distinct constraint categories and an average of 5.1 constraints per question, MM-IFEval presents a more challenging evaluation task compared to earlier benchmarks (e.g., [34], which has 8 categories and 2.6" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 76, + 289, + 229 + ], + "blocks": [ + { + "bbox": [ + 63, + 76, + 289, + 229 + ], + "lines": [ + { + "bbox": [ + 63, + 76, + 289, + 229 + ], + "spans": [ + { + "bbox": [ + 63, + 76, + 289, + 229 + ], + "type": "image", + "image_path": "e90dbd5a52e6d099a2eb2d93609c0aa8ceee4aed0b36fcf9264cf178892f3f49.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 246, + 303, + 290 + ], + "lines": [ + { + "bbox": [ + 59, + 246, + 303, + 290 + ], + "spans": [ + { + "bbox": [ + 59, + 246, + 303, + 290 + ], + "type": "text", + "content": "Figure 3. Constraint Quantity Distribution in MM-IFInstruct-23k. Our MM-IFInstruct-23k exhibits systematic variation in constraint complexity, with each sample containing 3-12 constraints per instruction." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 312, + 296, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 296, + 372 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 296, + 372 + ], + "type": "text", + "content": "average constraints per question). Furthermore, our benchmark incorporates essential constraints such as \"Output in JSON format\", which is prevalent and practical in real-world scenarios, a feature not found in previous multi-modal instruction following benchmarks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 373, + 296, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 373, + 296, + 481 + ], + "spans": [ + { + "bbox": [ + 55, + 373, + 296, + 481 + ], + "type": "text", + "content": "Compose-level and Perception-level Questions. compose-level questions involve textual constraints, while perception-level questions require greater visual perception ability to solve. The perception-level questions incorporate a variety of image sources, such as natural scenes, user interfaces, diagrams, table charts, and mathematical expressions, which we believe are representative of real-world applications. Please refer to the Appendix for examples of compose-level and perception-level questions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 491, + 167, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 167, + 504 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 167, + 504 + ], + "type": "text", + "content": "4.2. Hybrid Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 509, + 297, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 509, + 297, + 652 + ], + "spans": [ + { + "bbox": [ + 55, + 509, + 297, + 652 + ], + "type": "text", + "content": "Current multi-modal instruction following benchmarks often rely solely on GPT-4o for evaluation. However, accurately assessing certain constraints, such as numerical conditions (e.g., 'output in 200 words', 'Answer in 5 paragraphs', 'Use the word 'cat' in the answer twice'), remains challenging even for GPT-4o. In contrast, verifiable functions like string matching offer greater precision than judge models for such constraints. To address this, we propose a hybrid evaluation strategy (see Fig. 2(c)) that employs three methods, including both rule-based Verification and judge models for more robust and precise evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 654, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 297, + 714 + ], + "type": "text", + "content": "(1) Rule-based Verification. For constraints that adhere to a fixed format and involve specific content that can be objectively verified—yet remain challenging for an LLM to assess accurately—we employ a rule-based approach. Specifically, we design a set of predefined functions for different con" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 347, + 72, + 512, + 236 + ], + "blocks": [ + { + "bbox": [ + 347, + 72, + 512, + 236 + ], + "lines": [ + { + "bbox": [ + 347, + 72, + 512, + 236 + ], + "spans": [ + { + "bbox": [ + 347, + 72, + 512, + 236 + ], + "type": "image", + "image_path": "514e423050fb0272612f0f222c607457aa6658a3c1d4f4cee5e2a32743f32099.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 247, + 550, + 291 + ], + "lines": [ + { + "bbox": [ + 307, + 247, + 550, + 291 + ], + "spans": [ + { + "bbox": [ + 307, + 247, + 550, + 291 + ], + "type": "text", + "content": "Figure 4. Constraint Category Distribution inCompose-Level Problems of MM-IFEval. This part comprises six primary constraint categories with 32 subcategories, forming a multi-level taxonomy for instruction-following evaluation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 312, + 555, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 312, + 555, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 312, + 555, + 384 + ], + "type": "text", + "content": "strand types. The LLM is first prompted to extract the relevant parameters, denoted as Params, from the constraint description. When evaluating a constraint that falls within the scope of our rule-based framework, we use Params and the model's output as inputs to the predefined function to determine compliance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 384, + 556, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 556, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 556, + 514 + ], + "type": "text", + "content": "(2) LLM-based Direct Judgment. This method is primarily used for evaluating constraints that can be easily and unambiguously verified based on the model's output. It is applicable to constraints where correctness is straightforward to determine, such as those requiring the inclusion of specific words or phrases. For instance, a constraint like \"Use the word 'inspiration' or its synonyms at least twice in the response\" does not follow a strict format and cannot be assessed using a rule-based approach. Instead, we directly leverage an LLM to determine whether the constraint is satisfied." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 516, + 556, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 516, + 556, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 516, + 556, + 635 + ], + "type": "text", + "content": "(3) LLM-based Comparative Judgment. Some constraints, particularly those related to tone, style, or role-playing, are difficult to evaluate directly. To improve judgment accuracy, we adopt a comparative approach. Specifically, we generate a second model output using a nearly identical prompt but without the constraint under evaluation. The LLM-based evaluator is then provided with both outputs and asked to compare them, determining whether the model's response with the constraint in the prompt adheres more closely to the expected requirement." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 646, + 395, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 646, + 395, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 646, + 395, + 658 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 665, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 555, + 714 + ], + "type": "text", + "content": "Benchmarks. We select the following benchmarks to demonstrate that models fine-tuned on MM-IFInstruct-23k and MM-IFDPO-23k enhance instruction following without compromising performance on other VQA tasks: (1)" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 118, + 569, + 253 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "type": "text", + "content": "Table 1. Main results on Instruction Following benchmarks, including our proposed MM-IFEval, MIA-Bench [34], and IFEval [57]. The symbol " + }, + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "type": "inline_equation", + "content": "{}^{\\mathrm{M}}" + }, + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "type": "text", + "content": " refers to multimodal benchmarks,and " + }, + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "type": "inline_equation", + "content": "{}^{\\mathrm{T}}" + }, + { + "bbox": [ + 55, + 70, + 555, + 115 + ], + "type": "text", + "content": " denotes text-only benchmarks. We report both compose-level (\"C\") and perception-level (\"P\") for MM-IFEval,prompt-level accuracy (\"Prompt.\")andInst-level accuracy (\"Inst.\")for IFEval,and the averaged results across all three benchmarks in the rightmost column." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 118, + 569, + 253 + ], + "lines": [ + { + "bbox": [ + 57, + 118, + 569, + 253 + ], + "spans": [ + { + "bbox": [ + 57, + 118, + 569, + 253 + ], + "type": "table", + "html": "
ModelParameterMM-IFEvalM(ours)MIA MIFTAvg.
CPAvg.Prompt.Inst.Avg.
LLaVA-NeXT-7B [21]7B36.816.031.673.232.043.337.747.5
LLaVA-OneVision-Qwen2-7B-OV [16]8B37.424.034.084.543.354.849.055.8
InternVL2-8B [7]8B45.232.041.986.244.657.050.859.6
InternVL2.5-8B [6]8B49.636.046.288.552.262.457.364.0
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.783.345.056.450.757.9
w. MM-IFInstruct-23k-59.319.049.2 +9.586.5 +3.250.861.856.3 +5.664.0 +6.1
w. MM-IFDPO-23k-58.721.049.3 +9.690.0 +6.764.573.769.1 +18.469.5 +11.6
Qwen2-VL-7B-Instruct [41]8B42.740.042.080.542.452.547.456.6
w. MM-IFInstruct-23k-57.038.052.3 +10.387.7 +7.246.858.452.6 +5.264.2 +7.6
w. MM-IFDPO-23k-55.243.052.2 +10.288.1 +7.655.264.359.7 +12.366.7 +10.1
", + "image_path": "7d4bda3009acb1f7beb7ea117256abba0234caba3ad6dd0ed42754e5c74ba40c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 57, + 300, + 563, + 392 + ], + "blocks": [ + { + "bbox": [ + 55, + 263, + 555, + 297 + ], + "lines": [ + { + "bbox": [ + 55, + 263, + 555, + 297 + ], + "spans": [ + { + "bbox": [ + 55, + 263, + 555, + 297 + ], + "type": "text", + "content": "Table 2. Main results on VQA benchmarks, including general knowledge (MMMU [50], MMBench [24], MMStar [5], MMT-Bench [48]), document understanding (AI2D [15], OCRBench [25]), Chat (MMVet [49]) and Hallusion (POPE [19]). Fine-tuning models on MM-IFDPO-23k achieve comparable performance across these benchmarks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 300, + 563, + 392 + ], + "lines": [ + { + "bbox": [ + 57, + 300, + 563, + 392 + ], + "spans": [ + { + "bbox": [ + 57, + 300, + 563, + 392 + ], + "type": "table", + "html": "
ModelGeneralDocumentChatHallusion
MMMUvalMMBenchdevMMStarMMT-BenchvalAI2DOCRBenchMM VetPOPEAvg.
LLaVA-NeXT-Llama3-8B [21]43.772.543.653.173.155.043.387.258.9
w. MM-IFInstruct-23k45.869.344.253.371.255.346.388.859.3
w. MM-IFDPO-23k44.172.143.753.172.356.743.986.859.1
Qwen2-VL-7B-Instruct [41]53.981.060.863.282.986.763.386.372.3
w. MM-IFInstruct-23k54.079.357.161.081.681.861.689.270.7
w. MM-IFDPO-23k54.081.358.563.783.386.866.185.772.4
", + "image_path": "6b5b9b93d1fcd79a581602fab86639d6577f9b424223acb45e02e3004e7e0e55.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 412, + 294, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 412, + 294, + 495 + ], + "spans": [ + { + "bbox": [ + 54, + 412, + 294, + 495 + ], + "type": "text", + "content": "Instruction Following benchmarks, including MIA-Bench [34], IFEval [57], and our proposed MM-IFEval. To be noted, IFEval is a language-only benchmark while others are both multi-modal benchmarks. (2) VQA Benchmarks, including MMMU [50], MMBench [24], MMStar [5], AI2D [15], OCRBench [25], MMVet [49], POPE [19] and MMT-Bench [48]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 496, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 496, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 54, + 496, + 295, + 616 + ], + "type": "text", + "content": "Implementation Details. We conducted SFT and DPO fine-tuning experiments on two representative MLLMs: Qwen2-VL-7B-Instruct [41] and LLaVA-Next-Llama3-8B [21], using our custom datasets MM-IFInstruct-23k for supervised fine-tuning (SFT) and MM-IFDPO-23k for direct preference optimization (DPO). For the SFT phase, we used a batch size of 128 and a learning rate of 1e-5. For the DPO phase, we used a learning rate of 5e-7 with the batch size of 16. We implemented our training pipeline with the help of LLaMAFactory and evaluation pipeline under VLMEvalkit [10]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 624, + 295, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 295, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 295, + 647 + ], + "type": "text", + "content": "5.1. Results about MM-IFInstruct-23k and MM-IFDPO-23k" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "type": "text", + "content": "Consistently Improvements on Instruction Following Benchmarks. As shown in Tab. 1, both MM-IFInstruct-23k and MM-IFDPO-23k significantly enhance the model's performance in instruction following benchmarks. Finetuning LLaVA-Next and Qwen2-VL on MM-IFInstruct-23k" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "content": "yielded significant averaging performance gains of " + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "inline_equation", + "content": "6.1\\%" + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "inline_equation", + "content": "7.6\\%" + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "content": " points, respectively. Furthermore, applying DPO with MM-IFDPO-23k also led to notable improvements for LLaVA-Next and Qwen2-VL, with average gains of " + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "inline_equation", + "content": "11.6\\%" + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "inline_equation", + "content": "10.1\\%" + }, + { + "bbox": [ + 313, + 412, + 555, + 496 + ], + "type": "text", + "content": " points. Such improvements demonstrate the effectiveness of MM-IFEngine in constructing high-quality training data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 509, + 556, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 509, + 556, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 509, + 556, + 604 + ], + "type": "text", + "content": "Comparable Results on VQA Benchmarks. To show that fine-tuning on MM-IFInstruct-23k and MM-IFDPO-23k improves instruction following without degrading performance on other VQA tasks, we analyzed model performance on other widely used benchmarks, as detailed in Tab. 2. Results indicate that models fine-tuning with MM-IFInstruct-23k and MM-IFDPO-23k demonstrate comparable performance across these benchmarks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": "SFT vs DPO. As evidenced by Tab. 1 and Tab. 2, DPO using MM-IFDPO-23k significantly surpasses SFT on MM-IFInstruct-23k. This is likely due to negative samples of DPO, which are essential for training models to respect constraints, particularly in our data with multiple and diverse constraints. Additionally, the Kullback-Leibler (KL) divergence in DPO preserves the model's generalization, as demonstrated in Tab. 2." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 129, + 296, + 316 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 297, + 126 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 297, + 126 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 297, + 126 + ], + "type": "text", + "content": "Table 3. Evaluation of various MLLMs on MM-IFEval. We report the accuracy of easy and difficult problems and the average accuracy across all problems. The C-Level and P-Level refer to the compose-level and perception-level problems, respectively. The best performance in each section is highlighted in bold." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 129, + 296, + 316 + ], + "lines": [ + { + "bbox": [ + 56, + 129, + 296, + 316 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 296, + 316 + ], + "type": "table", + "html": "
ModelParamC-LevelP-LevelAvg.
Proprietary MLLMs
Claude-3.5V-Sonnet [1]-67.544.061.7
GPT-4o-mini [13]-70.440.062.8
GPT-4o (20240806) [13]-71.544.064.6
Open-Source MLLMs
LLaVA-NeXT-7B [21]7B36.816.031.6
LLaVA-OneVision-Qwen2-7b-OV [16]8B37.424.034.0
MiniCPM-V-2.6 [47]8B39.232.037.4
InternVL2-8B [7]8B45.232.041.9
InternVL2-40B [7]40B48.036.045.0
InternVL2.5-8B [6]8B49.636.046.2
InternVL2.5-26B [6]8B53.532.048.1
Qwen2-VL-72B-Instruct [41]72B53.443.050.8
LLaVA-NeXT-Llama3-8B [21]8B45.921.039.7
+ MM-IFDPO-23k-58.721.049.3
Qwen2-VL-7B-Instruct [41]8B42.740.042.0
+ MM-IFDPO-23k-55.243.052.2
", + "image_path": "7d566bf817e91560624919137435e2e54084d3f343d7e69395cddc8f69c2ba35.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 336, + 211, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 336, + 211, + 348 + ], + "spans": [ + { + "bbox": [ + 55, + 336, + 211, + 348 + ], + "type": "text", + "content": "5.2. Leaderboard of MM-IFEval" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 354, + 295, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 354, + 295, + 413 + ], + "spans": [ + { + "bbox": [ + 55, + 354, + 295, + 413 + ], + "type": "text", + "content": "We present the performance comparison results of various MLLMs on our MM-IFEval in Tab. 3, including both proprietary MLLMs such as GPT-4o [13] and Claude-3.5 [1] and open-source MLLMs such as LLaVA-Next [21], LLaVA-OneVision [16], InternVL [6, 7], and Qwen2-VL [41]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 413, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 413, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 413, + 295, + 616 + ], + "type": "text", + "content": "MM-IFEval is Challenging. Results on Tab. 3 demonstrate that multimodal instruction following is still a challenging and unsolved task for current MLLMs, specifically for the perception-level problems. The propriety models GPT-4o and Claude-3.5V-Sonnet establish top-tier average performance with scores of 64.6 and 61.7, respectively. The leading open-source MLLM, Qwen2-VL-72B merely achieves an overall accuracy of 50.8. We attribute the performance gap between proprietary and open-source models to the scarcity of high-quality open-source training data for instruction following. As a result of our MM-IFDPO-23k, Qwen2-VL-7B fine-tuned via our optimized DPO approach achieves a score of 52.2, demonstrating a " + }, + { + "bbox": [ + 55, + 413, + 295, + 616 + ], + "type": "inline_equation", + "content": "24.3\\%" + }, + { + "bbox": [ + 55, + 413, + 295, + 616 + ], + "type": "text", + "content": " relative improvement over its baseline (42.0), and even surpasses the larger Qwen2VL-72B model. We hope our MM-IFEval benchmark motivates further exploration into improving MLLM instruction-following." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 616, + 296, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 616, + 296, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 616, + 296, + 665 + ], + "type": "text", + "content": "Benchmark Examples. Please refer to the Appendix for visual examples of MM-IFEval, including images and instructions with constraints for both compose-level and perception-level problems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 672, + 156, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 156, + 683 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 156, + 683 + ], + "type": "text", + "content": "5.3. Ablation Studies" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "Ablation Studies on Different DPO Settings. In Tab. 4, we present an ablation study on various strategies for con" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 317, + 129, + 553, + 255 + ], + "blocks": [ + { + "bbox": [ + 313, + 70, + 555, + 125 + ], + "lines": [ + { + "bbox": [ + 313, + 70, + 555, + 125 + ], + "spans": [ + { + "bbox": [ + 313, + 70, + 555, + 125 + ], + "type": "text", + "content": "Table 4. Ablation studies across different DPO settings, including randomly deleting constraints (second row to fourth row) or prompting MLLMs without images (bottom row) to generate negative responses. Avg. refers to the average score of three IF benchmarks." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 129, + 553, + 255 + ], + "lines": [ + { + "bbox": [ + 317, + 129, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 553, + 255 + ], + "type": "table", + "html": "
ModelMM-IFEvalMIAIFEvalAvg.
Qwen2-VL-7B-Instruct42.080.547.456.6
+ DPO (-33% cons)51.588.257.965.8
+ DPO (-66% cons)51.288.058.465.9
+ DPO (-100% cons)52.288.159.766.7
+ DPO (w/o img)48.486.954.763.4
LLaVA-NeXT-Llama3-8B39.783.350.757.9
+ DPO (-33% cons)50.487.264.367.3
+ DPO (-66% cons)48.786.869.768.4
+ DPO (-100% cons)49.390.069.169.5
+ DPO (w/o img)44.785.964.865.2
", + "image_path": "3d7a238954389d9ee0e5757dcc7bb6a09455c5244976e249843db6c5a9f8ab02.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 271, + 555, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 271, + 555, + 344 + ], + "spans": [ + { + "bbox": [ + 313, + 271, + 555, + 344 + ], + "type": "text", + "content": "structuring pairwise preference data for Direct Preference Optimization (DPO). These strategies primarily include: (1) generating rejected responses by randomly removing constraints from the instruction (second to fourth rows), and (2) prompting MLLMs without providing image inputs to generate rejected responses (bottom row)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "text", + "content": "We conduct experiments on both the Qwen2-VL-7B-Instruct and LLaVA-NeXT-Llama3-8B models. As shown in Tab. 4, all DPO variants exhibit strong robustness, consistently outperforming the baseline. Among the four evaluated strategies, removing " + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "text", + "content": " of the constraints to generate rejected responses achieves the best performance, whereas omitting image inputs yields the weakest performance. Furthermore, we observe a consistent trend: as the proportion of removed constraints increases from " + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 313, + 344, + 556, + 511 + ], + "type": "text", + "content": ", the performance of the resulting DPO models improves accordingly. This suggests that removing more constraints amplifies the semantic gap between preferred and rejected responses, thereby enhancing the effectiveness of contrastive learning during DPO training." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 512, + 555, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 512, + 555, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 512, + 555, + 548 + ], + "type": "text", + "content": "Based on these findings, we adopt the " + }, + { + "bbox": [ + 313, + 512, + 555, + 548 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 313, + 512, + 555, + 548 + ], + "type": "text", + "content": " -constraint removal strategy as the default approach for constructing the DPO data in MM-IFDPO-23k." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 561, + 388, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 561, + 388, + 573 + ], + "spans": [ + { + "bbox": [ + 313, + 561, + 388, + 573 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "content": "This paper contributes to the field of multimodal instruction-following by exploring pipelines for training data collection and proposing a challenging benchmark. We present MM-IFEngine, a pipeline designed to generate image-instruction pairs, subsequently used to construct MM-IFInstruct-23k for SFT and MM-IFDPO-23k for DPO. We also analyze the limitations of existing multimodal instruction following benchmarks and propose MM-IFEval, a benchmark featuring diverse instruction types and a hybrid evaluation strategy that combines rule-based methods with an LLM-based judge. We hope this work inspires further research into improving the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "text", + "content": "instruction-following ability of Multimodal Large Language Models, a critical step towards realizing their potential in diverse and impactful applications." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 120, + 115, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 120, + 115, + 133 + ], + "spans": [ + { + "bbox": [ + 56, + 120, + 115, + 133 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 140, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 140, + 218, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 140, + 218, + 151 + ], + "spans": [ + { + "bbox": [ + 61, + 140, + 218, + 151 + ], + "type": "text", + "content": "[1] Anthropic. Claude 3.5 sonnet. 2024. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 152, + 296, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 152, + 296, + 207 + ], + "spans": [ + { + "bbox": [ + 61, + 152, + 296, + 207 + ], + "type": "text", + "content": "[2] Yonatan Bitton, Hritik Bansal, Jack Hessel, Rulin Shao, Wanrong Zhu, Anas Awadalla, Josh Gardner, Rohan Taori, and Ludwig Schmidt. VisIT-Bench: A benchmark for vision-language instruction following inspired by real-world use. In NeurIPS, Datasets and Benchmarks, 2023. 1, 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 209, + 296, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 209, + 296, + 262 + ], + "spans": [ + { + "bbox": [ + 62, + 209, + 296, + 262 + ], + "type": "text", + "content": "[3] Guiming Hardy Chen, Shunian Chen, Ruifei Zhang, Junying Chen, Xiangbo Wu, Zhiyi Zhang, Zhihong Chen, Jianquan Li, Xiang Wan, and Benyou Wang. Allava: Harnessing gpt4v-synthesized data for lite vision-language models. arXiv preprint arXiv:2402.11684, 2024. 3, 4, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 264, + 296, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 264, + 296, + 308 + ], + "spans": [ + { + "bbox": [ + 62, + 264, + 296, + 308 + ], + "type": "text", + "content": "[4] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 310, + 296, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 310, + 296, + 354 + ], + "spans": [ + { + "bbox": [ + 62, + 310, + 296, + 354 + ], + "type": "text", + "content": "[5] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? In NeurIPS, 2024. 3, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 356, + 296, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 356, + 296, + 420 + ], + "spans": [ + { + "bbox": [ + 62, + 356, + 296, + 420 + ], + "type": "text", + "content": "[6] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 422, + 296, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 422, + 296, + 477 + ], + "spans": [ + { + "bbox": [ + 62, + 422, + 296, + 477 + ], + "type": "text", + "content": "[7] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 479, + 296, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 479, + 296, + 522 + ], + "spans": [ + { + "bbox": [ + 62, + 479, + 296, + 522 + ], + "type": "text", + "content": "[8] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 524, + 296, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 524, + 296, + 590 + ], + "spans": [ + { + "bbox": [ + 62, + 524, + 296, + 590 + ], + "type": "text", + "content": "[9] Biplab Deka, Zifeng Huang, Chad Franzen, Joshua Hibschman, Daniel Afergan, Yang Li, Jeffrey Nichols, and Ranjitha Kumar. Rico: A mobile app dataset for building data-driven design applications. In Proceedings of the 30th annual ACM symposium on user interface software and technology, pages 845-854, 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 592, + 296, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 592, + 296, + 656 + ], + "spans": [ + { + "bbox": [ + 57, + 592, + 296, + 656 + ], + "type": "text", + "content": "[10] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM international conference on multimedia, pages 11198-11201, 2024. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "type": "text", + "content": "[11] Xinyu Fang, Zhijian Chen, Kai Lan, Shengyuan Ding, Yingji Liang, Xiangyu Zhao, Farong Wen, Zicheng Zhang, Guofeng Zhang, Haodong Duan, et al. Creation-mmbench: Assessing context-aware creative intelligence in mllm. arXiv preprint arXiv:2503.14478, 2025. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 714 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 73, + 555, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 555, + 127 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 555, + 127 + ], + "type": "text", + "content": "[12] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023.5.2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 129, + 555, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 555, + 172 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 555, + 172 + ], + "type": "text", + "content": "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. GPT-4o system card. arXiv preprint arXiv:2410.21276, 2024. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 173, + 555, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 173, + 555, + 226 + ], + "spans": [ + { + "bbox": [ + 317, + 173, + 555, + 226 + ], + "type": "text", + "content": "[14] Yuxin Jiang, Yufei Wang, Xingshan Zeng, Wanjun Zhong, Liangyou Li, Fei Mi, Lifeng Shang, Xin Jiang, Qun Liu, and Wei Wang. Followbench: A multi-level fine-grained constraints following benchmark for large language models. In ACL, 2024. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 228, + 554, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 228, + 554, + 259 + ], + "spans": [ + { + "bbox": [ + 316, + 228, + 554, + 259 + ], + "type": "text", + "content": "[15] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 3, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 261, + 555, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 261, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 316, + 261, + 555, + 304 + ], + "type": "text", + "content": "[16] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. LLaVA-OneVision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 305, + 554, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 554, + 349 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 554, + 349 + ], + "type": "text", + "content": "[17] Huayang Li, Siheng Li, Deng Cai, Longyue Wang, Lemao Liu, Taro Watanabe, Yujiu Yang, and Shuming Shi. TextBind: Multi-turn interleaved multimodal instruction-following in the wild. In ACL Findings, 2024. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "type": "text", + "content": "[18] Jian Li, Weiheng Lu, Hao Fei, Meng Luo, Ming Dai, Min Xia, Yizhang Jin, Zhenye Gan, Ding Qi, Chaoyou Fu, et al. A survey on benchmarks of multimodal large language models. arXiv preprint arXiv:2408.08632, 2024. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 394, + 554, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 394, + 554, + 426 + ], + "spans": [ + { + "bbox": [ + 316, + 394, + 554, + 426 + ], + "type": "text", + "content": "[19] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 427, + 555, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 427, + 555, + 458 + ], + "spans": [ + { + "bbox": [ + 316, + 427, + 555, + 458 + ], + "type": "text", + "content": "[20] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 460, + 555, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 460, + 555, + 492 + ], + "spans": [ + { + "bbox": [ + 316, + 460, + 555, + 492 + ], + "type": "text", + "content": "[21] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR,and world knowledge,2024.7,8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 494, + 555, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 494, + 555, + 536 + ], + "spans": [ + { + "bbox": [ + 316, + 494, + 555, + 536 + ], + "type": "text", + "content": "[22] Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding, 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 537, + 555, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 537, + 555, + 592 + ], + "spans": [ + { + "bbox": [ + 317, + 537, + 555, + 592 + ], + "type": "text", + "content": "[23] Yangzhou Liu, Yue Cao, Zhangwei Gao, Weiyun Wang, Zhe Chen, Wenhai Wang, Hao Tian, Lewei Lu, Xizhou Zhu, Tong Lu, Yu Qiao, and Jifeng Dai. Mminstruct: a high-quality multi-modal instruction tuning dataset with extensive diversity. Science China Information Sciences, 67(12), 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 593, + 554, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 593, + 554, + 635 + ], + "spans": [ + { + "bbox": [ + 317, + 593, + 554, + 635 + ], + "type": "text", + "content": "[24] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. MMBench: Is your multi-modal model an all-around player? In ECCV, 2024. 3, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 636, + 554, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 636, + 554, + 690 + ], + "spans": [ + { + "bbox": [ + 317, + 636, + 554, + 690 + ], + "type": "text", + "content": "[25] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. OCRBench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 2024. 3, 5, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 691, + 554, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 554, + 714 + ], + "type": "text", + "content": "[26] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "type": "text", + "content": "Lin, et al. MMDU: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. In NeurIPS Datasets and Benchmarks Track, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 106, + 296, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 106, + 296, + 138 + ], + "spans": [ + { + "bbox": [ + 56, + 106, + 296, + 138 + ], + "type": "text", + "content": "[27] Renze Lou, Kai Zhang, and Wenpeng Yin. A comprehensive survey on instruction following. arXiv preprint arXiv:2303.10475, 2023. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "type": "text", + "content": "[28] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 184, + 296, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 184, + 296, + 237 + ], + "spans": [ + { + "bbox": [ + 56, + 184, + 296, + 237 + ], + "type": "text", + "content": "[29] Ziyang Luo, Can Xu, Pu Zhao, Qingfeng Sun, Xiubo Geng, Wenxiang Hu, Chongyang Tao, Jing Ma, Qingwei Lin, and Daxin Jiang. Wizardcoder: Empowering code large language models with evol-instruct. arXiv preprint arXiv:2306.08568, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 239, + 296, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 296, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 296, + 293 + ], + "type": "text", + "content": "[30] Yubo Ma, Yuhang Zang, Liangyu Chen, Meiqi Chen, Yizhu Jiao, Xinze Li, Xinyuan Lu, Ziyu Liu, Yan Ma, Xiaoyi Dong, et al. MMLongBench-Doc: Benchmarking long-context document understanding with visualizations. In NeurlPS Datasets and Benchmarks Track, 2024. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 294, + 296, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 294, + 296, + 337 + ], + "spans": [ + { + "bbox": [ + 56, + 294, + 296, + 337 + ], + "type": "text", + "content": "[31] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 338, + 296, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 296, + 358 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 296, + 358 + ], + "type": "text", + "content": "[32] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023. Accessed: 2025-02-23. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 360, + 296, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 296, + 380 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 296, + 380 + ], + "type": "text", + "content": "[33] OpenAI. GPT-4V(ison) System Card. 2023. Accessed: 2025-02-23. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 383, + 296, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 383, + 296, + 425 + ], + "spans": [ + { + "bbox": [ + 56, + 383, + 296, + 425 + ], + "type": "text", + "content": "[34] Yusu Qian, Hanrong Ye, Jean-Philippe Fauconnier, Peter Grasch, Yinfei Yang, and Zhe Gan. MIA-Bench: Towards better instruction following evaluation of multimodal llms. In ICLR, 2025. 1, 2, 3, 5, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "type": "text", + "content": "[35] Yiwei Qin, Kaiqiang Song, Yebowen Hu, Wenlin Yao, Sangwoo Cho, Xiaoyang Wang, Xuansheng Wu, Fei Liu, Pengfei Liu, and Dong Yu. InFoBench: Evaluating instruction following ability in large language models. arXiv preprint arXiv:2401.03601, 2024. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 482, + 296, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 482, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 56, + 482, + 296, + 535 + ], + "type": "text", + "content": "[36] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 2, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 537, + 296, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 537, + 296, + 601 + ], + "spans": [ + { + "bbox": [ + 56, + 537, + 296, + 601 + ], + "type": "text", + "content": "[37] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018. 4, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 603, + 296, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 603, + 296, + 657 + ], + "spans": [ + { + "bbox": [ + 56, + 603, + 296, + 657 + ], + "type": "text", + "content": "[38] Lucy Xiaoyang Shi, Brian Ichter, Michael Equi, Liyiming Ke, Karl Pertsch, Quan Vuong, James Tanner, Anna Walling, Haohuan Wang, Niccolo Fusai, et al. Hi Robot: Open-ended instruction following with hierarchical vision-language-action models. arXiv preprint arXiv:2502.19417, 2025. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 658, + 296, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 296, + 690 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 296, + 690 + ], + "type": "text", + "content": "[39] Dingjie Song, Shunian Chen, Guiming Hardy Chen, Fei Yu, Xiang Wan, and Benyou Wang. Milebench: Benchmarking mllms in long context, 2024. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 692, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 692, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 692, + 296, + 713 + ], + "type": "text", + "content": "[40] Fei Wang, Xingyu Fu, James Y. Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou," + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 333, + 72, + 555, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 555, + 128 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 555, + 128 + ], + "type": "text", + "content": "Kai Zhang, Tianyi Lorena Yan, Wenjie Jacky Mo, Hsiang-Hui Liu, Pan Lu, Chunyuan Li, Chaowei Xiao, Kai-Wei Chang, Dan Roth, Sheng Zhang, Hoifung Poon, and Muhao Chen. Muirbench: A comprehensive benchmark for robust multi-image understanding, 2024. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 129, + 555, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 555, + 183 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 555, + 183 + ], + "type": "text", + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 185, + 555, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 185, + 555, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 185, + 555, + 239 + ], + "type": "text", + "content": "[42] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 241, + 555, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 241, + 555, + 295 + ], + "spans": [ + { + "bbox": [ + 316, + 241, + 555, + 295 + ], + "type": "text", + "content": "[43] Xilin Wei, Xiaoran Liu, Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Jian Tong, Haodong Duan, Qipeng Guo, Jiaqi Wang, et al. Videorope: What makes for good video rotary position embedding? arXiv preprint arXiv:2502.05173, 2025. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 297, + 555, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 297, + 555, + 341 + ], + "spans": [ + { + "bbox": [ + 316, + 297, + 555, + 341 + ], + "type": "text", + "content": "[44] Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244, 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 342, + 555, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 342, + 555, + 373 + ], + "spans": [ + { + "bbox": [ + 316, + 342, + 555, + 373 + ], + "type": "text", + "content": "[45] Zhiyang Xu, Ying Shen, and Lifu Huang. Multiinstruct: Improving multi-modal zero-shot learning via instruction tuning, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 376, + 555, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 376, + 555, + 419 + ], + "spans": [ + { + "bbox": [ + 316, + 376, + 555, + 419 + ], + "type": "text", + "content": "[46] Zhiyang Xu, Chao Feng, Rulin Shao, Trevor Ashby, Ying Shen, Di Jin, Yu Cheng, Qifan Wang, and Lifu Huang. Visionplan: Scaling human-labeled tasks in visual instruction tuning, 2024. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 422, + 555, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 555, + 465 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 555, + 465 + ], + "type": "text", + "content": "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. MiniCPM-V: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 467, + 555, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 467, + 555, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 467, + 555, + 544 + ], + "type": "text", + "content": "[48] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi, 2024. 3, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 545, + 555, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 545, + 555, + 589 + ], + "spans": [ + { + "bbox": [ + 316, + 545, + 555, + 589 + ], + "type": "text", + "content": "[49] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. MM-Vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 590, + 555, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 590, + 555, + 644 + ], + "spans": [ + { + "bbox": [ + 316, + 590, + 555, + 644 + ], + "type": "text", + "content": "[50] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expertagi. In CVPR, 2024.3,7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 647, + 555, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 555, + 700 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 555, + 700 + ], + "type": "text", + "content": "[51] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2. 5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 702, + 555, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 702, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 702, + 555, + 713 + ], + "type": "text", + "content": "[52] Yuhang Zang, Wei Li, Jun Han, Kaiyang Zhou, and" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 384 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 76, + 72, + 296, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 296, + 95 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 296, + 95 + ], + "type": "text", + "content": "Chen Change Loy. Contextual object detection with multimodal large language models. IJCV, 2025. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 296, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 296, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 296, + 150 + ], + "type": "text", + "content": "[53] Tao Zhang, Yanjun Shen, Wenjing Luo, Yan Zhang, Hao Liang, Fan Yang, Mingan Lin, Yujing Qiao, Weipeng Chen, Bin Cui, et al. CFBench: A comprehensive constraints-following benchmark for llms. arXiv preprint arXiv:2408.01122, 2024. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 295, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 295, + 185 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 295, + 185 + ], + "type": "text", + "content": "[54] Xinghua Zhang, Haiyang Yu, Cheng Fu, Fei Huang, and Yongbin Li. Iopo: Empowering llms with complex instruction following via input-output preference optimization, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "type": "text", + "content": "[55] Xiangyu Zhao, Shengyuan Ding, Zicheng Zhang, Haian Huang, Maosong Cao, Weiyun Wang, Jiaqi Wang, Xinyu Fang, Wenhai Wang, Guangtao Zhai, et al. Omnialign-v: Towards enhanced alignment of mllms with human preference. arXiv preprint arXiv:2502.18411, 2025. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 241, + 295, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 241, + 295, + 296 + ], + "spans": [ + { + "bbox": [ + 56, + 241, + 295, + 296 + ], + "type": "text", + "content": "[56] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. In NeurIPS Datasets and Benchmarks Track, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 297, + 296, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 296, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 296, + 342 + ], + "type": "text", + "content": "[57] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023. 1, 2, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 342, + 296, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 342, + 296, + 384 + ], + "spans": [ + { + "bbox": [ + 56, + 342, + 296, + 384 + ], + "type": "text", + "content": "[58] Wangchunshu Zhou, Yuchen Eleanor Jiang, Ethan Wilcox, Ryan Cotterell, and Mrinmaya Sachan. Controlled text generation with natural language instructions. In ICML, 2023. 2" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 122, + 68, + 489, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 68, + 489, + 110 + ], + "spans": [ + { + "bbox": [ + 122, + 68, + 489, + 110 + ], + "type": "text", + "content": "MM-IFEngine: Towards Multimodal Instruction Following Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 123, + 135, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 123, + 135, + 136 + ], + "spans": [ + { + "bbox": [ + 56, + 123, + 135, + 136 + ], + "type": "text", + "content": "A. MM-IFEval" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 144, + 288, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 288, + 157 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 288, + 157 + ], + "type": "text", + "content": "A.1. An overview of Constraints and Instructions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 163, + 135, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 135, + 175 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 135, + 175 + ], + "type": "text", + "content": "A.1.1. Constraints" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 179, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 179, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 179, + 294, + 251 + ], + "type": "text", + "content": "Based on daily use cases and existing research, we have identified six main categories of constraints, which can be further divided into 32 specific constraint types shown in Fig. 5. In this section, we introduce and exemplify these six major constraint categories. For detailed descriptions and examples of all 32 subcategories, please refer to Table 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 253, + 295, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 253, + 295, + 408 + ], + "spans": [ + { + "bbox": [ + 55, + 253, + 295, + 408 + ], + "type": "text", + "content": "Text Length Requirements. In this category, we focus on the length of the response, including the number of paragraphs, sentences, and words. We also consider the length of the response in the aspect of poetry or \"Use yes or no to answer the question\". It must be noted that we do not require the model to follow the strict requirement in exact numbers like \"The response must be exactly 56 words\". The constraints we propose in this category are based on reality, with precise numerical requirements only at the sentence or paragraph level, and of moderate size; the rest of the constraints are used to limit by ranges like \"The response must be between 100 and 150 words\", which aligns with the task that people tend to encounter in real-world scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 410, + 295, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 410, + 295, + 505 + ], + "spans": [ + { + "bbox": [ + 55, + 410, + 295, + 505 + ], + "type": "text", + "content": "Mathematical Requirements. This category includes constraints related to the most common part of answering mathematical problems like precision, scientific notation, and other mathematical requirements. For example, \"Keep two decimal places for the number in the answer\", \"Please round up all the numbers in the answer\", or \"Don't include specific numbers in your answers. Compare numbers with their relative sizes\"." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 507, + 295, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 507, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 507, + 295, + 590 + ], + "type": "text", + "content": "Language & Formatting Requirements. This category includes constraints related to the language and formatting of the response, such as answering in a specific language, using a specific format like JSON, or using a specific style like poetry. Requirements for tense, writing style, numbering, list, and other language-related or formatting-related aspects are also included in this category." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 592, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 592, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 55, + 592, + 295, + 675 + ], + "type": "text", + "content": "Rhetoric & Logic Requirements. \"Rhetoric\" refers to the art of using language to persuade or influence, while \"Logic\" refers to the principles of reasoning and argumentation. This category includes constraints related to the rhetoric and logic of the response, such as the use of metaphor, simple, cause-and-effect relationship, conditional statement, and other rhetoric and logic-related aspects." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "content": "Action Requirements. \"Action\" refers to the action that the model should take like a human. We define this category as the constraints that require the model to perform a specific" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 125, + 553, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 125, + 553, + 184 + ], + "spans": [ + { + "bbox": [ + 313, + 125, + 553, + 184 + ], + "type": "text", + "content": "action, such as tone, role imitation, use specific prefix or suffix, or acting like under some specific situation. We hope this category can help us to evaluate the ability of the model to follow instructions and perform actions in more complex and realistic scenarios." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 186, + 555, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 186, + 555, + 281 + ], + "spans": [ + { + "bbox": [ + 313, + 186, + 555, + 281 + ], + "type": "text", + "content": "Keyword Requirements. \"Keyword\" refers to the specific words or phrases that the model should include or avoid in the response. This category includes constraints related to the response keyword, such as the use of specific keywords, the avoidance of specific keywords, or the variation of specific keywords. For example, \"Use at least three synonyms for 'innovation,' such as 'breakthrough,' 'new approach,' or 'invention,' spread throughout your text.\"" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 293, + 419, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 419, + 303 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 419, + 303 + ], + "type": "text", + "content": "A.1.2. Instruction Tasks" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 308, + 554, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 308, + 554, + 403 + ], + "spans": [ + { + "bbox": [ + 313, + 308, + 554, + 403 + ], + "type": "text", + "content": "For source datasets lacking original task instructions, we constructed a diverse task pool containing 18 instructions that encourage open-ended responses from models. These instructions can be categorized into five task types: Descriptive Analysis, Emotional & Perspective, Creative Writing, Social Media & Content, and Roleplay. The classification information and examples of the instructions are shown in Table 6." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 416, + 460, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 416, + 460, + 428 + ], + "spans": [ + { + "bbox": [ + 313, + 416, + 460, + 428 + ], + "type": "text", + "content": "A.2. Perception-level Problems" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 388, + 449, + 481, + 542 + ], + "blocks": [ + { + "bbox": [ + 388, + 449, + 481, + 542 + ], + "lines": [ + { + "bbox": [ + 388, + 449, + 481, + 542 + ], + "spans": [ + { + "bbox": [ + 388, + 449, + 481, + 542 + ], + "type": "image", + "image_path": "26f2af004dd7f5937db26a4a9177826d59f32c69e865ec08b9eeb5358d9181b2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 552, + 555, + 608 + ], + "lines": [ + { + "bbox": [ + 313, + 552, + 555, + 608 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 555, + 608 + ], + "type": "text", + "content": "Figure 6. Image Source Distribution in perception-level problems.Perception-level problems in MM-IFEval presents a systematic categorization of 100 challenging vision-based instructionfollowing tasks, organized into 13 distinct classes according to image content characteristics and task complexity." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": "Perception-level problems in MM-IFEval comprise 100 carefully crafted questions with strong image-constraint correlations. The images can be categorized into 13 information-rich and complex domains shown in Figure 6. Figures 10, 11, 12, and 13 present representative examples from the web interface, diagram, poster, and visual difference categories, respectively, demonstrating the diverse visual challenges incorporated in our benchmark." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 307, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 307, + 740 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 307, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 70, + 504, + 206 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 504, + 206 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 504, + 206 + ], + "type": "image", + "image_path": "6e3680bdcc5cb2321579d781eb8c241e9d5302be72393bbcf07887d1d980df2c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 216, + 555, + 240 + ], + "lines": [ + { + "bbox": [ + 54, + 216, + 555, + 240 + ], + "spans": [ + { + "bbox": [ + 54, + 216, + 555, + 240 + ], + "type": "text", + "content": "Figure 5. Demonstration of constraints categories. We designed 6 main categories for all the constraints used, with a total of 32 subcategories" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 253, + 149, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 253, + 149, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 253, + 149, + 266 + ], + "type": "text", + "content": "B. Image Sources" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 273, + 296, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 273, + 296, + 321 + ], + "spans": [ + { + "bbox": [ + 55, + 273, + 296, + 321 + ], + "type": "text", + "content": "The quality of the image source is crucial for the performance of the model. Except of this, the diversity of the image source is also important to fully utilize or evaluate the ability of the model. We use the following image source:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 322, + 296, + 597 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 322, + 296, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 322, + 296, + 380 + ], + "spans": [ + { + "bbox": [ + 55, + 322, + 296, + 380 + ], + "type": "text", + "content": "- Natural Scene: The natural scene is the most common image source, which is most used in the real-world like the image of a beautiful landscape, a busy street, or a crowded cafe. In this part, we sample images from CC3M[37] and ALLaVA[3]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 381, + 296, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 381, + 296, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 381, + 296, + 464 + ], + "type": "text", + "content": "- UI Interface: The UI interface is the image from the UI interface of the website and mobile application. It is crucial because it represents a significant portion of real-world multimodal interactions where users need to understand and interact with digital interfaces. We collected diverse mobile app UI images from the RICO[9] dataset and web UI images from the MultiUI[22] dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 465, + 296, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 465, + 296, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 465, + 296, + 525 + ], + "type": "text", + "content": "- Diagram & Chart: The diagram and chart are the image that contains some specific information like the data, the relationship between the data, or the change of the data. We collect diagram and chart images from ChartQA[31] dataset, which contains diverse diagram and chart images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 525, + 296, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 296, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 296, + 597 + ], + "type": "text", + "content": "- **Mathematic:** The math problem is the image that contains a math problem, which is a common task in the real-world like the problem of the math, the solution of the math problem, or the calculation of the math problem. We collect math problem images from Geo170k[12] dataset, which contains diverse geometry problem images." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 609, + 241, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 609, + 241, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 609, + 241, + 624 + ], + "type": "text", + "content": "C. MM-IFEngine Prompt Template" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "type": "text", + "content": "MM-IFEngine provides a scalable pipeline for mass-producing instruction-following datasets for multimodal large language models, functioning effectively regardless of whether source datasets contain original instructions. This engine enables systematic augmentation of existing visual datasets with diverse instruction-following tasks. Figures 14 and 15 demonstrate representative prompt templates from" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 254, + 555, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 254, + 555, + 302 + ], + "spans": [ + { + "bbox": [ + 313, + 254, + 555, + 302 + ], + "type": "text", + "content": "MM-IFEngine's two core components: the instruction generation module and the constraint integration module, respectively, illustrating the methodology behind our automated data construction process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 319, + 542, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 542, + 333 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 542, + 333 + ], + "type": "text", + "content": "D. MM-IFInstruct and MM-IFDPO Dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 342, + 556, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 556, + 485 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 556, + 485 + ], + "type": "text", + "content": "Our MM-IFInstruct dataset integrates three distinct data sources: CC3M (without original instructions), ALLaVA (with pre-existing questions), and a diversity collection composed of MultiUI, ChartQA, and Geo170k. To create the MM-IFDPO dataset for preference optimization, we randomly removed " + }, + { + "bbox": [ + 313, + 342, + 556, + 485 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 313, + 342, + 556, + 485 + ], + "type": "text", + "content": " of constraints from the MM-IFInstruct samples to generate rejected examples. Figures 16, 17, and 18 illustrate representative samples derived from CC3M, ALLaVA, and our diversity collection, respectively, while Figure 19 demonstrates an example pair from the MM-IFDPO dataset showing both preferred and rejected instructions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 504, + 388, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 504, + 388, + 516 + ], + "spans": [ + { + "bbox": [ + 314, + 504, + 388, + 516 + ], + "type": "text", + "content": "E. Evaluation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 525, + 391, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 525, + 391, + 537 + ], + "spans": [ + { + "bbox": [ + 314, + 525, + 391, + 537 + ], + "type": "text", + "content": "E.1. Rule-based" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 545, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 556, + 713 + ], + "type": "text", + "content": "We identified 10 constraint subcategories from our taxonomy of 32 that could be algorithmically verified. For these selected constraints, we developed specialized verification functions with targeted parameters. For efficiency, we employed large language models to analyze each constraint specification, select the most appropriate verification function, and extract the necessary parameters. All selections were subsequently validated through manual review to ensure the accuracy and quality of both the function selection and their parameters. The prompt template used for function selection and parameter extraction is illustrated in Figure 20, while Table 7 provides a comprehensive overview of all verification functions with their corresponding parameter examples." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 58, + 72, + 192, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 72, + 192, + 85 + ], + "spans": [ + { + "bbox": [ + 58, + 72, + 192, + 85 + ], + "type": "text", + "content": "E.2. Compare Judge Method" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 89, + 295, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 89, + 295, + 232 + ], + "spans": [ + { + "bbox": [ + 57, + 89, + 295, + 232 + ], + "type": "text", + "content": "Recent works[11, 28] have shown that GPT-4o has the ability to compare two responses from models. For constraint types lacking objective evaluation metrics (such as tone requirements or role imitation), we implemented a comparative assessment method. This approach requires the model under evaluation to generate two responses: one adhering to the target constraint and another without the constraint. A judge model then analyzes both outputs to determine whether significant differences exist between them, thereby more accurately assessing whether the model has successfully followed these subjective constraints. Figure 21 illustrates the prompt used in this comparative evaluation process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 239, + 178, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 239, + 178, + 252 + ], + "spans": [ + { + "bbox": [ + 58, + 239, + 178, + 252 + ], + "type": "text", + "content": "E.3. Direct Judge Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 257, + 295, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 257, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 57, + 257, + 295, + 293 + ], + "type": "text", + "content": "The Direct Judge method provides the constraint and answer of the model under test directly to the Judge model, and its prompt template is shown in Figure 22." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 81, + 91, + 200, + 202 + ], + "blocks": [ + { + "bbox": [ + 81, + 91, + 200, + 202 + ], + "lines": [ + { + "bbox": [ + 81, + 91, + 200, + 202 + ], + "spans": [ + { + "bbox": [ + 81, + 91, + 200, + 202 + ], + "type": "image", + "image_path": "da1c12581fdf4703106d135a0c117b8491e0a5df760368394ef31e1010533d9d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 272, + 82, + 301, + 111 + ], + "blocks": [ + { + "bbox": [ + 272, + 82, + 301, + 111 + ], + "lines": [ + { + "bbox": [ + 272, + 82, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 272, + 82, + 301, + 111 + ], + "type": "image", + "image_path": "0484a0868ae93c6822a2e37ad27487a45d7da1c18d389d48bfe3030a757e0da2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 309, + 88, + 398, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 88, + 398, + 105 + ], + "spans": [ + { + "bbox": [ + 309, + 88, + 398, + 105 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 317, + 123, + 517, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 123, + 517, + 180 + ], + "spans": [ + { + "bbox": [ + 317, + 123, + 517, + 180 + ], + "type": "text", + "content": "What might have led to the dog's behavior as depicted in this image?" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 82, + 214, + 108, + 243 + ], + "blocks": [ + { + "bbox": [ + 82, + 214, + 108, + 243 + ], + "lines": [ + { + "bbox": [ + 82, + 214, + 108, + 243 + ], + "spans": [ + { + "bbox": [ + 82, + 214, + 108, + 243 + ], + "type": "image", + "image_path": "f85c35d98213bdd15cfb04c35d6896020e317749ab4d75ff2de2cf0f5000a581.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 218, + 210, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 218, + 210, + 235 + ], + "spans": [ + { + "bbox": [ + 116, + 218, + 210, + 235 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 248, + 493, + 354 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 81, + 248, + 336, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 248, + 336, + 260 + ], + "spans": [ + { + "bbox": [ + 81, + 248, + 336, + 260 + ], + "type": "text", + "content": "1.target Audience requirement: Your audience is a dog lover." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 81, + 261, + 463, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 261, + 463, + 271 + ], + "spans": [ + { + "bbox": [ + 81, + 261, + 463, + 271 + ], + "type": "text", + "content": "2.tense所需要的: Use present tense in the first paragraph and past tense in the second." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 81, + 272, + 423, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 272, + 423, + 283 + ], + "spans": [ + { + "bbox": [ + 81, + 272, + 423, + 283 + ], + "type": "text", + "content": "3.tone Requirement: Adopt a reassuring, empathetic tone as if consoling someone." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 81, + 284, + 411, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 284, + 411, + 295 + ], + "spans": [ + { + "bbox": [ + 81, + 284, + 411, + 295 + ], + "type": "text", + "content": "4.paragraph_number_limit: Your response must consist of exactly 3 paragraphs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 81, + 296, + 407, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 296, + 407, + 307 + ], + "spans": [ + { + "bbox": [ + 81, + 296, + 407, + 307 + ], + "type": "text", + "content": "5.mention: Mention the term 'sorry' at least twice throughout your description." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 81, + 308, + 493, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 308, + 493, + 331 + ], + "spans": [ + { + "bbox": [ + 81, + 308, + 493, + 331 + ], + "type": "text", + "content": "6highlight所需要的: Use bold for the first occurrence of the term 'aggressive behavior' in each paragraph." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 81, + 331, + 429, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 331, + 429, + 342 + ], + "spans": [ + { + "bbox": [ + 81, + 331, + 429, + 342 + ], + "type": "text", + "content": "7wrap_up Requirement: Provide a final paragraph summarizing the key arguments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 81, + 343, + 403, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 343, + 403, + 354 + ], + "spans": [ + { + "bbox": [ + 81, + 343, + 403, + 354 + ], + "type": "text", + "content": "8. perspective Requirement: Please answer the question in the second person." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 78, + 410, + 259, + 537 + ], + "blocks": [ + { + "bbox": [ + 78, + 410, + 259, + 537 + ], + "lines": [ + { + "bbox": [ + 78, + 410, + 259, + 537 + ], + "spans": [ + { + "bbox": [ + 78, + 410, + 259, + 537 + ], + "type": "image", + "image_path": "5620c0d5370927e54a310681de1128ea2aee9ec77460e5d08a808103fb653489.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 705, + 508, + 717 + ], + "lines": [ + { + "bbox": [ + 100, + 705, + 508, + 717 + ], + "spans": [ + { + "bbox": [ + 100, + 705, + 508, + 717 + ], + "type": "text", + "content": "Figure 8. A compose-level problem example from the MM-IFEval benchmark in the chart image category." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 280, + 408, + 308, + 437 + ], + "blocks": [ + { + "bbox": [ + 96, + 380, + 512, + 392 + ], + "lines": [ + { + "bbox": [ + 96, + 380, + 512, + 392 + ], + "spans": [ + { + "bbox": [ + 96, + 380, + 512, + 392 + ], + "type": "text", + "content": "Figure 7. A compose-level problem example from the MM-IFEval benchmark in the general image category." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 280, + 408, + 308, + 437 + ], + "lines": [ + { + "bbox": [ + 280, + 408, + 308, + 437 + ], + "spans": [ + { + "bbox": [ + 280, + 408, + 308, + 437 + ], + "type": "image", + "image_path": "907f08c10d6e79f5c38cd4e49277e1c617858900c1802711181bddafab3df592.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 415, + 401, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 415, + 401, + 430 + ], + "spans": [ + { + "bbox": [ + 315, + 415, + 401, + 430 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 324, + 451, + 532, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 451, + 532, + 510 + ], + "spans": [ + { + "bbox": [ + 324, + 451, + 532, + 510 + ], + "type": "text", + "content": "Which region has the highest value of apple production? Give the answer, and analyze the reasons for the large yield of apples in this area." + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 86, + 545, + 111, + 574 + ], + "blocks": [ + { + "bbox": [ + 86, + 545, + 111, + 574 + ], + "lines": [ + { + "bbox": [ + 86, + 545, + 111, + 574 + ], + "spans": [ + { + "bbox": [ + 86, + 545, + 111, + 574 + ], + "type": "image", + "image_path": "0153a9628e2e3eccb3237466825dbb02299faf3591c64102e7c65a7d3636acfa.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 124, + 552, + 216, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 552, + 216, + 567 + ], + "spans": [ + { + "bbox": [ + 124, + 552, + 216, + 567 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 81, + 586, + 518, + 673 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 81, + 586, + 382, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 586, + 382, + 599 + ], + "spans": [ + { + "bbox": [ + 81, + 586, + 382, + 599 + ], + "type": "text", + "content": "1. precision: In the answer, plot the output in the same unit." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 81, + 600, + 466, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 600, + 466, + 613 + ], + "spans": [ + { + "bbox": [ + 81, + 600, + 466, + 613 + ], + "type": "text", + "content": "2.title所需要的: Provide a concise title that summarizes the main idea." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 81, + 614, + 487, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 614, + 487, + 643 + ], + "spans": [ + { + "bbox": [ + 81, + 614, + 487, + 643 + ], + "type": "text", + "content": "3. perspective Requirement: Give your answer from the perspective of a Mexican agricultural expert." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 81, + 643, + 518, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 643, + 518, + 658 + ], + "spans": [ + { + "bbox": [ + 81, + 643, + 518, + 658 + ], + "type": "text", + "content": "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 81, + 658, + 464, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 658, + 464, + 673 + ], + "spans": [ + { + "bbox": [ + 81, + 658, + 464, + 673 + ], + "type": "text", + "content": "5. unstrict_formatting REQUIREments: Number the reasons for your analysis." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 99, + 98, + 242, + 202 + ], + "blocks": [ + { + "bbox": [ + 99, + 98, + 242, + 202 + ], + "lines": [ + { + "bbox": [ + 99, + 98, + 242, + 202 + ], + "spans": [ + { + "bbox": [ + 99, + 98, + 242, + 202 + ], + "type": "image", + "image_path": "6c1982e3cc10acf54c4994c66a37f7e8b2fa35ce0ebb3bd38f43899e361eddf4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 286, + 91, + 313, + 119 + ], + "blocks": [ + { + "bbox": [ + 286, + 91, + 313, + 119 + ], + "lines": [ + { + "bbox": [ + 286, + 91, + 313, + 119 + ], + "spans": [ + { + "bbox": [ + 286, + 91, + 313, + 119 + ], + "type": "image", + "image_path": "63652bdb4d2e32f585e27f8e52a240c787ad167c8c9de70c5880bade271d2159.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 99, + 404, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 99, + 404, + 113 + ], + "spans": [ + { + "bbox": [ + 320, + 99, + 404, + 113 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 325, + 122, + 529, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 122, + 529, + 206 + ], + "spans": [ + { + "bbox": [ + 325, + 122, + 529, + 206 + ], + "type": "text", + "content": "In triangle ABC, D is the midpoint of BC, E is the midpoint of AD, and F is the midpoint of CE. Given that the area of triangle ABC is 28 square centimeters, consider the impact of these midpoints on the subdivisions of the triangle. Analyze how these midpoints affect the areas of triangles within triangle ABC and provide a detailed explanation to find the area of the shaded region that is formed within triangle BEC and triangle AEC. Finally, deduce and conclude which part of the interior triangles contribute to the shaded area." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 83, + 213, + 108, + 240 + ], + "blocks": [ + { + "bbox": [ + 83, + 213, + 108, + 240 + ], + "lines": [ + { + "bbox": [ + 83, + 213, + 108, + 240 + ], + "spans": [ + { + "bbox": [ + 83, + 213, + 108, + 240 + ], + "type": "image", + "image_path": "d43f9962e8b33f145891928a51d28b11929c9878a53feea2d7123d86ff22bd5e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 217, + 205, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 217, + 205, + 232 + ], + "spans": [ + { + "bbox": [ + 116, + 217, + 205, + 232 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 251, + 512, + 341 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 83, + 251, + 498, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 251, + 498, + 263 + ], + "spans": [ + { + "bbox": [ + 83, + 251, + 498, + 263 + ], + "type": "text", + "content": "1.target Audience requirement: Write your answer for a liberal arts student. You're tutoring her in math." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 263, + 386, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 263, + 386, + 274 + ], + "spans": [ + { + "bbox": [ + 83, + 263, + 386, + 274 + ], + "type": "text", + "content": "2(word_count_range_limit: Please write between 150 and 200 words in total." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 274, + 399, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 274, + 399, + 285 + ], + "spans": [ + { + "bbox": [ + 83, + 274, + 399, + 285 + ], + "type": "text", + "content": "3.paragraph_number_limit: Your response must consist of exactly 4 paragraphs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 286, + 421, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 286, + 421, + 296 + ], + "spans": [ + { + "bbox": [ + 83, + 286, + 421, + 296 + ], + "type": "text", + "content": "4.sentence_number_limit: Each paragraph should contain between 3 and 5 sentences." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 297, + 429, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 297, + 429, + 307 + ], + "spans": [ + { + "bbox": [ + 83, + 297, + 429, + 307 + ], + "type": "text", + "content": "5.not Mention: Please do not mention the words 'formula' or 'equation' in your answer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 308, + 437, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 308, + 437, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 308, + 437, + 319 + ], + "type": "text", + "content": "6.mention: Mention the word 'midpoint' at least three times throughout your description." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 82, + 319, + 512, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 319, + 512, + 341 + ], + "spans": [ + { + "bbox": [ + 82, + 319, + 512, + 341 + ], + "type": "text", + "content": "7.tone Requirement: Write your answer in a positive and encouraging tone, emphasizing the simplicity of the geometric concepts involved." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 88, + 468, + 100, + 479 + ], + "blocks": [ + { + "bbox": [ + 88, + 468, + 100, + 479 + ], + "lines": [ + { + "bbox": [ + 88, + 468, + 100, + 479 + ], + "spans": [ + { + "bbox": [ + 88, + 468, + 100, + 479 + ], + "type": "image", + "image_path": "589cf867a22182e068246d12e82f9c8b33fe2623ec671b4a6092aff851dc7eff.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 695, + 499, + 708 + ], + "lines": [ + { + "bbox": [ + 110, + 695, + 499, + 708 + ], + "spans": [ + { + "bbox": [ + 110, + 695, + 499, + 708 + ], + "type": "text", + "content": "Figure 10. A perception-level problem example from the MM-IFEval benchmark in the web category." + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 101, + 468, + 116, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 468, + 116, + 478 + ], + "spans": [ + { + "bbox": [ + 101, + 468, + 116, + 478 + ], + "type": "text", + "content": "熱門" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 89, + 485, + 105, + 502 + ], + "blocks": [ + { + "bbox": [ + 89, + 485, + 105, + 502 + ], + "lines": [ + { + "bbox": [ + 89, + 485, + 105, + 502 + ], + "spans": [ + { + "bbox": [ + 89, + 485, + 105, + 502 + ], + "type": "image", + "image_path": "4f69995458ad65d6273503c3f1405a1ecd4111c77cb6d19c22e8f067983a182f.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 487, + 134, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 487, + 134, + 502 + ], + "spans": [ + { + "bbox": [ + 107, + 487, + 134, + 502 + ], + "type": "text", + "content": "BITCOIN BTC" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 91, + 509, + 104, + 525 + ], + "blocks": [ + { + "bbox": [ + 91, + 509, + 104, + 525 + ], + "lines": [ + { + "bbox": [ + 91, + 509, + 104, + 525 + ], + "spans": [ + { + "bbox": [ + 91, + 509, + 104, + 525 + ], + "type": "image", + "image_path": "7acea6432fb9022b5a33535d982ef3fcc5554b2dcf8c5fcd664b83946350ee55.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 510, + 142, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 510, + 142, + 524 + ], + "spans": [ + { + "bbox": [ + 107, + 510, + 142, + 524 + ], + "type": "text", + "content": "ETHEREUM ETH" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 89, + 531, + 105, + 548 + ], + "blocks": [ + { + "bbox": [ + 89, + 531, + 105, + 548 + ], + "lines": [ + { + "bbox": [ + 89, + 531, + 105, + 548 + ], + "spans": [ + { + "bbox": [ + 89, + 531, + 105, + 548 + ], + "type": "image", + "image_path": "2ee1295d0e9b5bc3e465acbf481149629ae85cfd7d1c7d0a05b741fd31c7e9b1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 533, + 143, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 533, + 143, + 548 + ], + "spans": [ + { + "bbox": [ + 107, + 533, + 143, + 548 + ], + "type": "text", + "content": "TETHER U... USDT" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 89, + 554, + 106, + 571 + ], + "blocks": [ + { + "bbox": [ + 89, + 554, + 106, + 571 + ], + "lines": [ + { + "bbox": [ + 89, + 554, + 106, + 571 + ], + "spans": [ + { + "bbox": [ + 89, + 554, + 106, + 571 + ], + "type": "image", + "image_path": "b3c9eac6671fe2b8119bc68e7872593103ecc8f2f6902e4917136ee6093c119a.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 107, + 555, + 126, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 555, + 126, + 570 + ], + "spans": [ + { + "bbox": [ + 107, + 555, + 126, + 570 + ], + "type": "text", + "content": "USDC USDC" + } + ] + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 89, + 578, + 106, + 594 + ], + "blocks": [ + { + "bbox": [ + 89, + 578, + 106, + 594 + ], + "lines": [ + { + "bbox": [ + 89, + 578, + 106, + 594 + ], + "spans": [ + { + "bbox": [ + 89, + 578, + 106, + 594 + ], + "type": "image", + "image_path": "6297fcacd3408db5e5079c1fa07b36dd45523b97c45725cff4d124471ce24fc5.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 107, + 578, + 121, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 121, + 593 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 121, + 593 + ], + "type": "text", + "content": "BNB BNB" + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 89, + 600, + 106, + 616 + ], + "blocks": [ + { + "bbox": [ + 89, + 600, + 106, + 616 + ], + "lines": [ + { + "bbox": [ + 89, + 600, + 106, + 616 + ], + "spans": [ + { + "bbox": [ + 89, + 600, + 106, + 616 + ], + "type": "image", + "image_path": "f621fd1a958e226377c6ec8dd2d58c6be49f6c28ae8e82474a52930ee148192d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 107, + 601, + 124, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 601, + 124, + 615 + ], + "spans": [ + { + "bbox": [ + 107, + 601, + 124, + 615 + ], + "type": "text", + "content": "BUSD BUSD" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 158, + 486, + 212, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 486, + 212, + 502 + ], + "spans": [ + { + "bbox": [ + 158, + 486, + 212, + 502 + ], + "type": "text", + "content": "3,156,526.95 " + }, + { + "bbox": [ + 158, + 486, + 212, + 502 + ], + "type": "inline_equation", + "content": "0.76\\%" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 167, + 510, + 197, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 510, + 197, + 525 + ], + "spans": [ + { + "bbox": [ + 167, + 510, + 197, + 525 + ], + "type": "text", + "content": "86,060.91-2.64%" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 178, + 533, + 196, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 533, + 196, + 548 + ], + "spans": [ + { + "bbox": [ + 178, + 533, + 196, + 548 + ], + "type": "text", + "content": "32.83-0.03%" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 177, + 555, + 196, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 555, + 196, + 571 + ], + "spans": [ + { + "bbox": [ + 177, + 555, + 196, + 571 + ], + "type": "text", + "content": "32.83 -0.01%" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 167, + 578, + 197, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 578, + 197, + 594 + ], + "spans": [ + { + "bbox": [ + 167, + 578, + 197, + 594 + ], + "type": "text", + "content": "19,024.08+0.47%" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 178, + 601, + 196, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 601, + 196, + 613 + ], + "spans": [ + { + "bbox": [ + 178, + 601, + 196, + 613 + ], + "type": "text", + "content": "32.890.08%" + } + ] + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 201, + 487, + 212, + 499 + ], + "blocks": [ + { + "bbox": [ + 201, + 487, + 212, + 499 + ], + "lines": [ + { + "bbox": [ + 201, + 487, + 212, + 499 + ], + "spans": [ + { + "bbox": [ + 201, + 487, + 212, + 499 + ], + "type": "image", + "image_path": "ec10135043baf7b83bf204b85b6f92a302788debf4131724f2feac72291fb4ab.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 202, + 510, + 212, + 521 + ], + "blocks": [ + { + "bbox": [ + 202, + 510, + 212, + 521 + ], + "lines": [ + { + "bbox": [ + 202, + 510, + 212, + 521 + ], + "spans": [ + { + "bbox": [ + 202, + 510, + 212, + 521 + ], + "type": "image", + "image_path": "7dc04617a68fbd011d6e2e59205e61d13f5fe74159dcab09d05f33af5f01fd78.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 274, + 428, + 302, + 458 + ], + "blocks": [ + { + "bbox": [ + 93, + 372, + 515, + 384 + ], + "lines": [ + { + "bbox": [ + 93, + 372, + 515, + 384 + ], + "spans": [ + { + "bbox": [ + 93, + 372, + 515, + 384 + ], + "type": "text", + "content": "Figure 9. A compose-level problem example from the MM-IFEval benchmark in the geometry image category." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 274, + 428, + 302, + 458 + ], + "lines": [ + { + "bbox": [ + 274, + 428, + 302, + 458 + ], + "spans": [ + { + "bbox": [ + 274, + 428, + 302, + 458 + ], + "type": "image", + "image_path": "37403248a25ce74f0fee9561af337d76b40215268fb8a2db12df7ab41da09904.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "bbox": [ + 309, + 436, + 397, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 436, + 397, + 452 + ], + "spans": [ + { + "bbox": [ + 309, + 436, + 397, + 452 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 464, + 526, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 526, + 526 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 526, + 526 + ], + "type": "text", + "content": "If someone just bought the orange currency for " + }, + { + "bbox": [ + 313, + 464, + 526, + 526 + ], + "type": "inline_equation", + "content": "12,000 and the blue currency for" + }, + { + "bbox": [ + 313, + 464, + 526, + 526 + ], + "type": "text", + "content": "15,000, what is the total amount of money they have now, based on the current currency situation? Round off the decimal part of the answer." + } + ] + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 272, + 552, + 302, + 580 + ], + "blocks": [ + { + "bbox": [ + 272, + 552, + 302, + 580 + ], + "lines": [ + { + "bbox": [ + 272, + 552, + 302, + 580 + ], + "spans": [ + { + "bbox": [ + 272, + 552, + 302, + 580 + ], + "type": "image", + "image_path": "0a174d1a8487ae270fa9531eb282b12b3bdf1ebe750de4487d240fbd9bec8c31.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "bbox": [ + 309, + 556, + 422, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 556, + 422, + 573 + ], + "spans": [ + { + "bbox": [ + 309, + 556, + 422, + 573 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 399, + 611, + 444, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 611, + 444, + 626 + ], + "spans": [ + { + "bbox": [ + 399, + 611, + 444, + 626 + ], + "type": "text", + "content": "26907" + } + ] + } + ], + "index": 42 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 44 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 121, + 239, + 325 + ], + "blocks": [ + { + "bbox": [ + 76, + 121, + 239, + 325 + ], + "lines": [ + { + "bbox": [ + 76, + 121, + 239, + 325 + ], + "spans": [ + { + "bbox": [ + 76, + 121, + 239, + 325 + ], + "type": "image", + "image_path": "93fae1c89257852bfbbe9a0a5b659cc8ae540c6562a41118c4f8fcb933ee185c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 101, + 372, + 508, + 384 + ], + "lines": [ + { + "bbox": [ + 101, + 372, + 508, + 384 + ], + "spans": [ + { + "bbox": [ + 101, + 372, + 508, + 384 + ], + "type": "text", + "content": "Figure 11. A perception-level problem example from the MM-IFEval benchmark in the diagram category." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 279, + 111, + 307, + 140 + ], + "blocks": [ + { + "bbox": [ + 279, + 111, + 307, + 140 + ], + "lines": [ + { + "bbox": [ + 279, + 111, + 307, + 140 + ], + "spans": [ + { + "bbox": [ + 279, + 111, + 307, + 140 + ], + "type": "image", + "image_path": "ede6d71a7e6a40c3e16bae59a649dada191f0d2c2d2d6a06b20011e6e74d99a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 314, + 118, + 400, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 118, + 400, + 134 + ], + "spans": [ + { + "bbox": [ + 314, + 118, + 400, + 134 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 319, + 145, + 526, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 145, + 526, + 202 + ], + "spans": [ + { + "bbox": [ + 319, + 145, + 526, + 202 + ], + "type": "text", + "content": "In this flowchart, which node is reached after the first condition encountered from Start is judged to be Yes? Preserve the case of node names." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 279, + 232, + 308, + 260 + ], + "blocks": [ + { + "bbox": [ + 279, + 232, + 308, + 260 + ], + "lines": [ + { + "bbox": [ + 279, + 232, + 308, + 260 + ], + "spans": [ + { + "bbox": [ + 279, + 232, + 308, + 260 + ], + "type": "image", + "image_path": "f11a13a7625ccb9fecf6ab27dca9dc1ff2f4e8e98d758891aaf3acc5e1041833.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 236, + 424, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 236, + 424, + 253 + ], + "spans": [ + { + "bbox": [ + 314, + 236, + 424, + 253 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 410, + 290, + 439, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 290, + 439, + 305 + ], + "spans": [ + { + "bbox": [ + 410, + 290, + 439, + 305 + ], + "type": "text", + "content": "End" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 66, + 497, + 267, + 615 + ], + "blocks": [ + { + "bbox": [ + 66, + 497, + 267, + 615 + ], + "lines": [ + { + "bbox": [ + 66, + 497, + 267, + 615 + ], + "spans": [ + { + "bbox": [ + 66, + 497, + 267, + 615 + ], + "type": "image", + "image_path": "7b82cca0e2400d6ba62d0d7b0cc2be5197f5fe5e94ded82d13f2a8c8d879f3ba.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 688, + 504, + 700 + ], + "lines": [ + { + "bbox": [ + 105, + 688, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 504, + 700 + ], + "type": "text", + "content": "Figure 12. A perception-level problem example from the MM-IFEval benchmark in the poster category." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 280, + 445, + 307, + 472 + ], + "blocks": [ + { + "bbox": [ + 280, + 445, + 307, + 472 + ], + "lines": [ + { + "bbox": [ + 280, + 445, + 307, + 472 + ], + "spans": [ + { + "bbox": [ + 280, + 445, + 307, + 472 + ], + "type": "image", + "image_path": "3996516606fd064cfdbe3369919ead6f54bcf5d8193d9d747fd5af6365ced908.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 451, + 397, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 451, + 397, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 451, + 397, + 467 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 478, + 507, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 478, + 507, + 548 + ], + "spans": [ + { + "bbox": [ + 318, + 478, + 507, + 548 + ], + "type": "text", + "content": "Observe the alphabet represented by white dots and line segments in the figure. Starting from 'A', what is the second letter composed of eight white dots? Output this letter in uppercase." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 280, + 561, + 308, + 588 + ], + "blocks": [ + { + "bbox": [ + 280, + 561, + 308, + 588 + ], + "lines": [ + { + "bbox": [ + 280, + 561, + 308, + 588 + ], + "spans": [ + { + "bbox": [ + 280, + 561, + 308, + 588 + ], + "type": "image", + "image_path": "65870911467b0cb4597e559aeff59872e5723c320fec13889cba633658b9431a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 566, + 421, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 566, + 421, + 582 + ], + "spans": [ + { + "bbox": [ + 313, + 566, + 421, + 582 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 415, + 618, + 426, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 618, + 426, + 632 + ], + "spans": [ + { + "bbox": [ + 415, + 618, + 426, + 632 + ], + "type": "text", + "content": "G" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 91, + 246, + 202 + ], + "blocks": [ + { + "bbox": [ + 76, + 91, + 246, + 202 + ], + "lines": [ + { + "bbox": [ + 76, + 91, + 246, + 202 + ], + "spans": [ + { + "bbox": [ + 76, + 91, + 246, + 202 + ], + "type": "image", + "image_path": "23657ebb87830fb6b12c608aa56b6a82daeeb3ab35f058f6a82db3d59dd9cf69.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 284, + 89, + 310, + 117 + ], + "blocks": [ + { + "bbox": [ + 284, + 89, + 310, + 117 + ], + "lines": [ + { + "bbox": [ + 284, + 89, + 310, + 117 + ], + "spans": [ + { + "bbox": [ + 284, + 89, + 310, + 117 + ], + "type": "image", + "image_path": "5d9b2a3cb57fe2986ad599fb1fc44d097fab64fa3f38ba4e5cececff287ec4ec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 708, + 503, + 719 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 503, + 719 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 503, + 719 + ], + "type": "text", + "content": "Figure 14. Prompt template for image generation instructions using a large language model in MM-IFEngine." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 318, + 96, + 400, + 112 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 96, + 400, + 112 + ], + "spans": [ + { + "bbox": [ + 318, + 96, + 400, + 112 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 322, + 123, + 521, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 123, + 521, + 183 + ], + "spans": [ + { + "bbox": [ + 322, + 123, + 521, + 183 + ], + "type": "text", + "content": "Sam and Tom used the red box and Tom used the blue box. They each gave three answers. Would you please judge which of the two boys found more differences? Print the name of the winning boy directly." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 76, + 205, + 246, + 315 + ], + "blocks": [ + { + "bbox": [ + 76, + 205, + 246, + 315 + ], + "lines": [ + { + "bbox": [ + 76, + 205, + 246, + 315 + ], + "spans": [ + { + "bbox": [ + 76, + 205, + 246, + 315 + ], + "type": "image", + "image_path": "7fc02214dfd29a49639cbccdd247b22267c5b82662bcdcc0f061ab82dc9c141f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 345, + 525, + 357 + ], + "lines": [ + { + "bbox": [ + 83, + 345, + 525, + 357 + ], + "spans": [ + { + "bbox": [ + 83, + 345, + 525, + 357 + ], + "type": "text", + "content": "Figure 13. A perception-level problem example from the MM-IFEval benchmark in the finding difference category." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 283, + 207, + 311, + 235 + ], + "blocks": [ + { + "bbox": [ + 283, + 207, + 311, + 235 + ], + "lines": [ + { + "bbox": [ + 283, + 207, + 311, + 235 + ], + "spans": [ + { + "bbox": [ + 283, + 207, + 311, + 235 + ], + "type": "image", + "image_path": "2bb7b33d20c7c67877fcd6b28c2171338819d36b4cf30b1ff80ca925a85f94b5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 318, + 211, + 425, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 211, + 425, + 228 + ], + "spans": [ + { + "bbox": [ + 318, + 211, + 425, + 228 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 410, + 264, + 440, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 264, + 440, + 278 + ], + "spans": [ + { + "bbox": [ + 410, + 264, + 440, + 278 + ], + "type": "text", + "content": "Tom" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 372, + 317, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 372, + 317, + 389 + ], + "spans": [ + { + "bbox": [ + 91, + 372, + 317, + 389 + ], + "type": "text", + "content": "Instruction generation prompt" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 81, + 403, + 440, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 403, + 440, + 418 + ], + "spans": [ + { + "bbox": [ + 81, + 403, + 440, + 418 + ], + "type": "text", + "content": "You are an expert in generating concise instructions for images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 435, + 130, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 435, + 130, + 448 + ], + "spans": [ + { + "bbox": [ + 83, + 435, + 130, + 448 + ], + "type": "text", + "content": "## Task" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 451, + 507, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 451, + 507, + 497 + ], + "spans": [ + { + "bbox": [ + 83, + 451, + 507, + 497 + ], + "type": "text", + "content": "Given the image, generate a list of appropriate instructions for it. Your instructions should not be too long or overly detailed, and they should not include any specific details about the image." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 499, + 475, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 499, + 475, + 545 + ], + "spans": [ + { + "bbox": [ + 83, + 499, + 475, + 545 + ], + "type": "text", + "content": "On one hand, you can choose appropriate instructions cases for the provided image from the Examples and modify them naturally for the image." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 547, + 507, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 547, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 83, + 547, + 507, + 578 + ], + "type": "text", + "content": "On the other hand, you can generate new instructions, but only if these new instructions are relevant and appropriate for the image." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 594, + 159, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 594, + 159, + 609 + ], + "spans": [ + { + "bbox": [ + 83, + 594, + 159, + 609 + ], + "type": "text", + "content": "Examples" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 610, + 230, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 610, + 230, + 625 + ], + "spans": [ + { + "bbox": [ + 83, + 610, + 230, + 625 + ], + "type": "text", + "content": "{original instructions list}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 641, + 380, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 641, + 380, + 656 + ], + "spans": [ + { + "bbox": [ + 83, + 641, + 380, + 656 + ], + "type": "text", + "content": "You output format should be in the following format:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 658, + 173, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 658, + 173, + 673 + ], + "spans": [ + { + "bbox": [ + 83, + 658, + 173, + 673 + ], + "type": "text", + "content": "{output format}" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 91, + 228, + 318, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 228, + 318, + 246 + ], + "spans": [ + { + "bbox": [ + 91, + 228, + 318, + 246 + ], + "type": "text", + "content": "Constraint integration prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 253, + 405, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 253, + 405, + 265 + ], + "spans": [ + { + "bbox": [ + 88, + 253, + 405, + 265 + ], + "type": "text", + "content": "You are an expert in add appropriate constraints to the instruction for images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 276, + 124, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 276, + 124, + 285 + ], + "spans": [ + { + "bbox": [ + 88, + 276, + 124, + 285 + ], + "type": "text", + "content": "Task" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 287, + 474, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 287, + 474, + 299 + ], + "spans": [ + { + "bbox": [ + 86, + 287, + 474, + 299 + ], + "type": "text", + "content": "Given the original instruction, your task is to expand the instruction by adding constraints to it." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 299, + 532, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 299, + 532, + 321 + ], + "spans": [ + { + "bbox": [ + 86, + 299, + 532, + 321 + ], + "type": "text", + "content": "You can select **as many as possible** appropriate types of constraints from the given **Constraints List** below and modify them. However, ensure that the constraints you generate meet the following requirements:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 322, + 384, + 355 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 88, + 322, + 344, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 322, + 344, + 332 + ], + "spans": [ + { + "bbox": [ + 88, + 322, + 344, + 332 + ], + "type": "text", + "content": "1. Maintain the thematic consistency of the original instruction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 333, + 384, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 333, + 384, + 344 + ], + "spans": [ + { + "bbox": [ + 88, + 333, + 384, + 344 + ], + "type": "text", + "content": "2.Be relevant and appropriate for the original instruction and be concise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 344, + 353, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 344, + 353, + 355 + ], + "spans": [ + { + "bbox": [ + 88, + 344, + 353, + 355 + ], + "type": "text", + "content": "3. Do not conflict with the original instruction or with each other." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 86, + 366, + 526, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 366, + 526, + 424 + ], + "spans": [ + { + "bbox": [ + 86, + 366, + 526, + 424 + ], + "type": "text", + "content": "For example, if the original instruction is a simple query like \"What color is the truck?\" you should avoid adding constraints such as \"Your response must be in three paragraphs,\" as such a requirement is unnecessary for a short and simple question. Moreover, if the original instruction is a question like \"What is the object in the image?\", you should avoid adding constraints such as \"Respond in the second-person to directly address the reader,\" as it conflicts with the original instruction." + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 88, + 435, + 184, + 459 + ], + "blocks": [ + { + "bbox": [ + 88, + 435, + 184, + 459 + ], + "lines": [ + { + "bbox": [ + 88, + 435, + 184, + 459 + ], + "spans": [ + { + "bbox": [ + 88, + 435, + 184, + 459 + ], + "type": "text", + "content": "Original Instruction {originalInstruction}" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 88, + 469, + 317, + 505 + ], + "blocks": [ + { + "bbox": [ + 88, + 469, + 317, + 505 + ], + "lines": [ + { + "bbox": [ + 88, + 469, + 317, + 505 + ], + "spans": [ + { + "bbox": [ + 88, + 469, + 317, + 505 + ], + "type": "text", + "content": "## Constraints List\nYour added constraints can be from the following types:\n{constraints_list_str}" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "erb" + }, + { + "type": "code", + "bbox": [ + 88, + 514, + 350, + 539 + ], + "blocks": [ + { + "bbox": [ + 88, + 514, + 350, + 539 + ], + "lines": [ + { + "bbox": [ + 88, + 514, + 350, + 539 + ], + "spans": [ + { + "bbox": [ + 88, + 514, + 350, + 539 + ], + "type": "text", + "content": "Output Format Your output should follow the format below: {output format}" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 173, + 568, + 436, + 580 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 568, + 436, + 580 + ], + "spans": [ + { + "bbox": [ + 173, + 568, + 436, + 580 + ], + "type": "text", + "content": "Figure 15. prompt template for integrating constraints in MM-IFEngine." + } + ] + } + ], + "index": 13, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 102, + 218, + 258, + 305 + ], + "blocks": [ + { + "bbox": [ + 102, + 218, + 258, + 305 + ], + "lines": [ + { + "bbox": [ + 102, + 218, + 258, + 305 + ], + "spans": [ + { + "bbox": [ + 102, + 218, + 258, + 305 + ], + "type": "image", + "image_path": "ea7d98cf5d46a87b51dcd4991ba87ca475604f3db9d6d6ae7d4dcf55faa82418.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 308, + 148, + 319 + ], + "lines": [ + { + "bbox": [ + 100, + 308, + 148, + 319 + ], + "spans": [ + { + "bbox": [ + 100, + 308, + 148, + 319 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 77, + 301, + 97, + 322 + ], + "blocks": [ + { + "bbox": [ + 77, + 301, + 97, + 322 + ], + "lines": [ + { + "bbox": [ + 77, + 301, + 97, + 322 + ], + "spans": [ + { + "bbox": [ + 77, + 301, + 97, + 322 + ], + "type": "image", + "image_path": "fa246e78d5d86cd9b166edad5795dce6faf8bcadbcdfe8b5d4f19f4f8d147359.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 97, + 330, + 271, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 330, + 271, + 353 + ], + "spans": [ + { + "bbox": [ + 97, + 330, + 271, + 353 + ], + "type": "text", + "content": "Write a short poem that captures the hustle and bustle of city life depicted in this image." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 70, + 373, + 88, + 394 + ], + "blocks": [ + { + "bbox": [ + 70, + 373, + 88, + 394 + ], + "lines": [ + { + "bbox": [ + 70, + 373, + 88, + 394 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 88, + 394 + ], + "type": "image", + "image_path": "7bf578d20e9149264d8eeba49452b97b318a01bb095a6f55866f3d420ba12a00.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 161, + 564, + 448, + 576 + ], + "lines": [ + { + "bbox": [ + 161, + 564, + 448, + 576 + ], + "spans": [ + { + "bbox": [ + 161, + 564, + 448, + 576 + ], + "type": "text", + "content": "Figure 16. A sample constructed by MM-IFEngine pipeline from cc3m dataset" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 376, + 148, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 376, + 148, + 387 + ], + "spans": [ + { + "bbox": [ + 96, + 376, + 148, + 387 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 95, + 399, + 265, + 542 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 95, + 399, + 255, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 399, + 255, + 424 + ], + "spans": [ + { + "bbox": [ + 95, + 399, + 255, + 424 + ], + "type": "text", + "content": "1. Write each stanza so it includes a parenthetical phrase." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 426, + 250, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 426, + 250, + 437 + ], + "spans": [ + { + "bbox": [ + 96, + 426, + 250, + 437 + ], + "type": "text", + "content": "2.Each stanza should have 4 lines." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 438, + 216, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 438, + 216, + 450 + ], + "spans": [ + { + "bbox": [ + 96, + 438, + 216, + 450 + ], + "type": "text", + "content": "3.Use bold for every noun." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 452, + 265, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 452, + 265, + 489 + ], + "spans": [ + { + "bbox": [ + 96, + 452, + 265, + 489 + ], + "type": "text", + "content": "4.Start the poem with 'In the heart of the city...' and end with '...the city never sleeps.'" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 491, + 258, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 491, + 258, + 515 + ], + "spans": [ + { + "bbox": [ + 96, + 491, + 258, + 515 + ], + "type": "text", + "content": "5.Use present tense throughout the poem." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 517, + 265, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 517, + 265, + 542 + ], + "spans": [ + { + "bbox": [ + 96, + 517, + 265, + 542 + ], + "type": "text", + "content": "6.Use the third-person perspective to maintain an objective viewpoint." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 295, + 232, + 325, + 255 + ], + "blocks": [ + { + "bbox": [ + 295, + 232, + 325, + 255 + ], + "lines": [ + { + "bbox": [ + 295, + 232, + 325, + 255 + ], + "spans": [ + { + "bbox": [ + 295, + 232, + 325, + 255 + ], + "type": "image", + "image_path": "b27cab99c2ed7531296bfc443c19e4215ad0b0f4f75480717906d223b26bd8b5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 242, + 361, + 251 + ], + "lines": [ + { + "bbox": [ + 326, + 242, + 361, + 251 + ], + "spans": [ + { + "bbox": [ + 326, + 242, + 361, + 251 + ], + "type": "text", + "content": "Answer" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 281, + 528, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 281, + 528, + 301 + ], + "spans": [ + { + "bbox": [ + 309, + 281, + 528, + 301 + ], + "type": "text", + "content": "In the heart of the city, where the \\*\\*streets\\*\\* hum with life." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 304, + 479, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 304, + 479, + 314 + ], + "spans": [ + { + "bbox": [ + 309, + 304, + 479, + 314 + ], + "type": "text", + "content": "People rush (beneath the bright blue sky)," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 315, + 492, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 315, + 492, + 326 + ], + "spans": [ + { + "bbox": [ + 309, + 315, + 492, + 326 + ], + "type": "text", + "content": "Buses pass (\\*\\*tires\\*\\* whispering on the road)," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 326, + 460, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 326, + 460, + 337 + ], + "spans": [ + { + "bbox": [ + 309, + 326, + 460, + 337 + ], + "type": "text", + "content": "Every moment feels like a lively show." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 348, + 477, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 348, + 477, + 358 + ], + "spans": [ + { + "bbox": [ + 309, + 348, + 477, + 358 + ], + "type": "text", + "content": "The \\*\\*signs\\*\\* stand tall amidst the crowd," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 359, + 494, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 359, + 494, + 370 + ], + "spans": [ + { + "bbox": [ + 309, + 359, + 494, + 370 + ], + "type": "text", + "content": "An ambulance speeds by (with urgent sound)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 309, + 371, + 460, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 371, + 460, + 381 + ], + "spans": [ + { + "bbox": [ + 309, + 371, + 460, + 381 + ], + "type": "text", + "content": "Newspapers rest (in their quiet stack)," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 382, + 488, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 382, + 488, + 392 + ], + "spans": [ + { + "bbox": [ + 309, + 382, + 488, + 392 + ], + "type": "text", + "content": "As footsteps weave through the urban track." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 403, + 447, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 403, + 447, + 415 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 447, + 415 + ], + "type": "text", + "content": "Buildings rise, their facades grand," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 415, + 450, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 415, + 450, + 426 + ], + "spans": [ + { + "bbox": [ + 309, + 415, + 450, + 426 + ], + "type": "text", + "content": "A man walks past (across the land)," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 309, + 426, + 441, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 426, + 441, + 437 + ], + "spans": [ + { + "bbox": [ + 309, + 426, + 441, + 437 + ], + "type": "text", + "content": "Voices blend into ambient noise," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 309, + 437, + 432, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 437, + 432, + 449 + ], + "spans": [ + { + "bbox": [ + 309, + 437, + 432, + 449 + ], + "type": "text", + "content": "Life unfolds without any poise." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 309, + 460, + 455, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 460, + 455, + 471 + ], + "spans": [ + { + "bbox": [ + 309, + 460, + 455, + 471 + ], + "type": "text", + "content": "Shoppers carry bags (full of delight)," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 309, + 472, + 481, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 472, + 481, + 482 + ], + "spans": [ + { + "bbox": [ + 309, + 472, + 481, + 482 + ], + "type": "text", + "content": "Sunlight dances (on the pavement's sight)," + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 309, + 483, + 445, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 483, + 445, + 494 + ], + "spans": [ + { + "bbox": [ + 309, + 483, + 445, + 494 + ], + "type": "text", + "content": "Each face tells tales of daily grind," + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 309, + 495, + 509, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 495, + 509, + 505 + ], + "spans": [ + { + "bbox": [ + 309, + 495, + 509, + 505 + ], + "type": "text", + "content": "Yet spirits soar, unconfined....the city never sleeps." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 95, + 206, + 234, + 311 + ], + "blocks": [ + { + "bbox": [ + 95, + 206, + 234, + 311 + ], + "lines": [ + { + "bbox": [ + 95, + 206, + 234, + 311 + ], + "spans": [ + { + "bbox": [ + 95, + 206, + 234, + 311 + ], + "type": "image", + "image_path": "bb4d03c21b188b7e6e948b05b0228250d985286a8306dc23cf84eb15360ad44c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 313, + 144, + 323 + ], + "lines": [ + { + "bbox": [ + 96, + 313, + 144, + 323 + ], + "spans": [ + { + "bbox": [ + 96, + 313, + 144, + 323 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 342, + 236, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 342, + 236, + 353 + ], + "spans": [ + { + "bbox": [ + 94, + 342, + 236, + 353 + ], + "type": "text", + "content": "What treat can be baked in an oven?" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 66, + 371, + 86, + 393 + ], + "blocks": [ + { + "bbox": [ + 66, + 371, + 86, + 393 + ], + "lines": [ + { + "bbox": [ + 66, + 371, + 86, + 393 + ], + "spans": [ + { + "bbox": [ + 66, + 371, + 86, + 393 + ], + "type": "image", + "image_path": "bc9dded61ccb3724cf7534e635cf47797e703693d01074c8691e6062ff4b1053.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 585, + 450, + 597 + ], + "lines": [ + { + "bbox": [ + 159, + 585, + 450, + 597 + ], + "spans": [ + { + "bbox": [ + 159, + 585, + 450, + 597 + ], + "type": "text", + "content": "Figure 17. A sample constructed by MM-IFEngine pipeline from Allava dataset" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 93, + 374, + 144, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 374, + 144, + 384 + ], + "spans": [ + { + "bbox": [ + 93, + 374, + 144, + 384 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 392, + 260, + 559 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 91, + 392, + 255, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 392, + 255, + 426 + ], + "spans": [ + { + "bbox": [ + 91, + 392, + 255, + 426 + ], + "type": "text", + "content": "1. Write your answer for a general adult audience with limited baking experience, providing clear and simple explanations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 91, + 426, + 248, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 426, + 248, + 459 + ], + "spans": [ + { + "bbox": [ + 91, + 426, + 248, + 459 + ], + "type": "text", + "content": "2. Write your answer in a positive and encouraging tone, making the baking process seem accessible and enjoyable." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 91, + 459, + 260, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 459, + 260, + 493 + ], + "spans": [ + { + "bbox": [ + 91, + 459, + 260, + 493 + ], + "type": "text", + "content": "3. Mention both 'oven' and 'baking' at least once in your response, ensuring they are used naturally and contextually." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 493, + 251, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 493, + 251, + 525 + ], + "spans": [ + { + "bbox": [ + 91, + 493, + 251, + 525 + ], + "type": "text", + "content": "4. Begin every sentence with an exclamation mark to convey excitement about baking." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 91, + 526, + 257, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 526, + 257, + 559 + ], + "spans": [ + { + "bbox": [ + 91, + 526, + 257, + 559 + ], + "type": "text", + "content": "5. Your response must consist of exactly 3 paragraphs, each focusing on a different type of treat that can be baked in an oven." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 290, + 209, + 320, + 232 + ], + "blocks": [ + { + "bbox": [ + 290, + 209, + 320, + 232 + ], + "lines": [ + { + "bbox": [ + 290, + 209, + 320, + 232 + ], + "spans": [ + { + "bbox": [ + 290, + 209, + 320, + 232 + ], + "type": "image", + "image_path": "bdd9fcab21c588a102cee9f5b639a91bcdb87ca0f7f02f68691766d792155ca6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 321, + 217, + 355, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 217, + 355, + 227 + ], + "spans": [ + { + "bbox": [ + 321, + 217, + 355, + 227 + ], + "type": "text", + "content": "Answer" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 240, + 520, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 240, + 520, + 344 + ], + "spans": [ + { + "bbox": [ + 304, + 240, + 520, + 344 + ], + "type": "text", + "content": "!One delightful treat you can bake in an oven is cookies! Whether you prefer classic chocolate chip or something more adventurous like oatmeal raisin, baking cookies is a fun and rewarding activity. The aroma of freshly baked cookies wafting through your home will make everyone excited!" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 345, + 523, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 523, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 523, + 449 + ], + "type": "text", + "content": "! Another scrumptious option for oven baking is muffins! Muffins are perfect for breakfast or as a snack, and you can customize them with various ingredients such as blueberries, bananas, or even chocolate chips. Baking muffins allows you to experiment with flavors while enjoying the satisfaction of creating something delicious from scratch." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 449, + 522, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 522, + 554 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 522, + 554 + ], + "type": "text", + "content": "! Lastly, consider baking a cake! Cakes come in countless varieties, from simple vanilla to rich chocolate, and can be decorated to suit any occasion. Baking a cake not only provides a sweet treat but also enhances your skills in mixing, measuring, and decorating. So grab your apron and get ready to enjoy the magic of baking in your own kitchen!" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 130, + 204, + 205, + 285 + ], + "blocks": [ + { + "bbox": [ + 130, + 204, + 205, + 285 + ], + "lines": [ + { + "bbox": [ + 130, + 204, + 205, + 285 + ], + "spans": [ + { + "bbox": [ + 130, + 204, + 205, + 285 + ], + "type": "image", + "image_path": "ef4afe7e62cd13d79634e9a4ebc580c21f65027704dad58fa5b37a5c231d52d6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 586, + 454, + 597 + ], + "lines": [ + { + "bbox": [ + 156, + 586, + 454, + 597 + ], + "spans": [ + { + "bbox": [ + 156, + 586, + 454, + 597 + ], + "type": "text", + "content": "Figure 18. A sample constructed by MM-IFEngine pipeline from geo170k dataset" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 71, + 278, + 92, + 300 + ], + "blocks": [ + { + "bbox": [ + 71, + 278, + 92, + 300 + ], + "lines": [ + { + "bbox": [ + 71, + 278, + 92, + 300 + ], + "spans": [ + { + "bbox": [ + 71, + 278, + 92, + 300 + ], + "type": "image", + "image_path": "ca77698c74d5a156981b000cf49d67605324d4995510c0e9d28ed9e134c00878.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 94, + 287, + 143, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 287, + 143, + 296 + ], + "spans": [ + { + "bbox": [ + 94, + 287, + 143, + 296 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 301, + 265, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 301, + 265, + 390 + ], + "spans": [ + { + "bbox": [ + 93, + 301, + 265, + 390 + ], + "type": "text", + "content": "In the diagram, quadrilateral ABCD is inscribed in circle O with diameter BE and line AE connected to it. Given that the measure of angle BCD is twice the measure of angle BAD, and that angle DAE is equal to angle CAD, develop a comprehensive geometric proof to determine the degree measure of angle DAE." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 66, + 395, + 84, + 415 + ], + "blocks": [ + { + "bbox": [ + 66, + 395, + 84, + 415 + ], + "lines": [ + { + "bbox": [ + 66, + 395, + 84, + 415 + ], + "spans": [ + { + "bbox": [ + 66, + 395, + 84, + 415 + ], + "type": "image", + "image_path": "f6b38cfc3886ea51093755f3f7be41388885e1728923505cc87d1cdc62a2637a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 92, + 397, + 143, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 397, + 143, + 407 + ], + "spans": [ + { + "bbox": [ + 92, + 397, + 143, + 407 + ], + "type": "text", + "content": "Constraints" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 91, + 420, + 255, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 420, + 255, + 458 + ], + "spans": [ + { + "bbox": [ + 91, + 420, + 255, + 458 + ], + "type": "text", + "content": "1. Your response must consist of exactly 3 paragraphs, each focusing on a different aspect of the proof: the initial setup, the application of theorems, and the final calculation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 91, + 459, + 259, + 517 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 91, + 459, + 259, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 459, + 259, + 487 + ], + "spans": [ + { + "bbox": [ + 91, + 459, + 259, + 487 + ], + "type": "text", + "content": "2. Each paragraph should contain between 4 and 6 sentences, ensuring a balanced and detailed explanation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 488, + 248, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 488, + 248, + 517 + ], + "spans": [ + { + "bbox": [ + 91, + 488, + 248, + 517 + ], + "type": "text", + "content": "3.The response must be between 120 and 150 words in total, maintaining clarity and conciseness." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 91, + 517, + 253, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 517, + 253, + 537 + ], + "spans": [ + { + "bbox": [ + 91, + 517, + 253, + 537 + ], + "type": "text", + "content": "4. Write your answer in a clear and methodical tone, suitable for a high school geometry class." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 91, + 537, + 258, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 537, + 258, + 567 + ], + "spans": [ + { + "bbox": [ + 91, + 537, + 258, + 567 + ], + "type": "text", + "content": "5.Provide a concise title that summarizes the main idea of the proof, such as 'Geometric Proof for Angle DAE'." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 291, + 210, + 321, + 233 + ], + "blocks": [ + { + "bbox": [ + 291, + 210, + 321, + 233 + ], + "lines": [ + { + "bbox": [ + 291, + 210, + 321, + 233 + ], + "spans": [ + { + "bbox": [ + 291, + 210, + 321, + 233 + ], + "type": "image", + "image_path": "5229e3f9be101f02820ef6c5312fbeb5ec600d9304a845c7c81adfecb6202d33.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 321, + 219, + 356, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 219, + 356, + 228 + ], + "spans": [ + { + "bbox": [ + 321, + 219, + 356, + 228 + ], + "type": "text", + "content": "Answer" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": "**Geometric Proof for Angle DAE** To begin our proof, we note that since quadrilateral ABCD is inscribed in circle O with BE as its diameter, angle ABE is a right angle (90 degrees) by the Inscribed Angle Theorem. This theorem states that an angle inscribed in a semicircle is always a right angle. Additionally, given that angle BCD is twice angle BAD, let's denote angle BAD as " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash(x\\backslash)" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": ". Therefore, angle BCD measures " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash(2x\\backslash)" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": "n\\NNext, applying the properties of cyclic quadrilaterals, we know that opposite angles sum up to 180 degrees. Thus, angle BAD plus angle BCD equals 180 degrees. Substituting the values, we get " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash(x + 2x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": ", leading to " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash(3x = 180^{\\wedge}\\backslash\\text{circ} \\backslash)" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "inline_equation", + "content": "\\backslash(x = 60^{\\wedge}\\backslash\\text{circ} \\backslash)" + }, + { + "bbox": [ + 306, + 248, + 526, + 552 + ], + "type": "text", + "content": ". Since angle DAE is equal to angle CAD, and considering triangle ADE where AD is common, angle DAE also measures 30 degrees because angle CAD is half of angle BAD due to symmetry and the given condition. Finally, calculating the exact measure of angle DAE involves recognizing that angle CAD is half of angle BAD, which was determined to be 60 degrees. Hence, angle CAD is 30 degrees. Consequently, angle DAE, being congruent to angle CAD, also measures 30 degrees. This completes our proof, demonstrating that angle DAE indeed measures 30 degrees based on the provided conditions and geometric principles applied throughout the process." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 85, + 79, + 218, + 167 + ], + "blocks": [ + { + "bbox": [ + 85, + 79, + 218, + 167 + ], + "lines": [ + { + "bbox": [ + 85, + 79, + 218, + 167 + ], + "spans": [ + { + "bbox": [ + 85, + 79, + 218, + 167 + ], + "type": "image", + "image_path": "d5913805078e9758b3b084021463a8066cf52a8f5c88938099c0eb7aedea580d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 339, + 504, + 350 + ], + "lines": [ + { + "bbox": [ + 105, + 339, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 504, + 350 + ], + "type": "text", + "content": "Figure 19. A DPO training set sample, where the rejected data is obtained by removing " + }, + { + "bbox": [ + 105, + 339, + 504, + 350 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 105, + 339, + 504, + 350 + ], + "type": "text", + "content": " of the constraints" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 169, + 86, + 187 + ], + "blocks": [ + { + "bbox": [ + 69, + 169, + 86, + 187 + ], + "lines": [ + { + "bbox": [ + 69, + 169, + 86, + 187 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 86, + 187 + ], + "type": "image", + "image_path": "7345a749a10e1e6c186fa9f6730013ec59203937baa69a45f79edc7ecc517d25.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 706, + 482, + 718 + ], + "lines": [ + { + "bbox": [ + 127, + 706, + 482, + 718 + ], + "spans": [ + { + "bbox": [ + 127, + 706, + 482, + 718 + ], + "type": "text", + "content": "Figure 20. Prompt template for automated verification function selection and paramater extraction" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 91, + 177, + 122, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 177, + 122, + 186 + ], + "spans": [ + { + "bbox": [ + 91, + 177, + 122, + 186 + ], + "type": "text", + "content": "prompt" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 196, + 211, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 196, + 211, + 310 + ], + "spans": [ + { + "bbox": [ + 77, + 196, + 211, + 310 + ], + "type": "text", + "content": "What might the seabird be doing flying alone over the ocean? Respond in the third-person perspective to maintain an objective viewpoint. Write the answer in the form of a brief nature observation log entry. Each paragraph should contain between 3 and 5 sentences. Write your answer for a general adult audience with an interest in wildlife and nature. Conclude with a single sentence summarizing the likely behavior of the seabird." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 227, + 87, + 247, + 106 + ], + "blocks": [ + { + "bbox": [ + 227, + 87, + 247, + 106 + ], + "lines": [ + { + "bbox": [ + 227, + 87, + 247, + 106 + ], + "spans": [ + { + "bbox": [ + 227, + 87, + 247, + 106 + ], + "type": "image", + "image_path": "a7b47c86c0f216460b617263eee211805158176d0cb11d14401cc8c47056be8d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 250, + 93, + 276, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 93, + 276, + 102 + ], + "spans": [ + { + "bbox": [ + 250, + 93, + 276, + 102 + ], + "type": "text", + "content": "Chosen" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 288, + 92, + 337, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 92, + 337, + 103 + ], + "spans": [ + { + "bbox": [ + 288, + 92, + 337, + 103 + ], + "type": "text", + "content": "Score:5/5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 234, + 118, + 381, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 118, + 381, + 296 + ], + "spans": [ + { + "bbox": [ + 234, + 118, + 381, + 296 + ], + "type": "text", + "content": "In today's observation, a solitary seabird was noted gliding gracefully above the vast expanse of the ocean. The bird's wings were fully extended, indicating efficient use of air currents to conserve energy while searching its surroundings. Seabirds often fly alone when scouting for food sources such as schools of fish or marine organisms near the surface. This lone flight could also serve purposes like territorial patrol or simply enjoying favorable wind conditions. In conclusion, the seabird is most likely engaged in foraging activities." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 386, + 85, + 408, + 107 + ], + "blocks": [ + { + "bbox": [ + 386, + 85, + 408, + 107 + ], + "lines": [ + { + "bbox": [ + 386, + 85, + 408, + 107 + ], + "spans": [ + { + "bbox": [ + 386, + 85, + 408, + 107 + ], + "type": "image", + "image_path": "5ee38ac2a4bb6d15d7ef96ab376ffb7813519d07e792a274fa6475fea2d02656.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 411, + 91, + 444, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 91, + 444, + 101 + ], + "spans": [ + { + "bbox": [ + 411, + 91, + 444, + 101 + ], + "type": "text", + "content": "Rejected" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 451, + 91, + 500, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 451, + 91, + 500, + 102 + ], + "spans": [ + { + "bbox": [ + 451, + 91, + 500, + 102 + ], + "type": "text", + "content": "Score:3/5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 395, + 114, + 482, + 136 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 395, + 114, + 482, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 114, + 482, + 122 + ], + "spans": [ + { + "bbox": [ + 395, + 114, + 482, + 122 + ], + "type": "text", + "content": "**Nature Observation Log Entry**" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 395, + 123, + 452, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 123, + 452, + 129 + ], + "spans": [ + { + "bbox": [ + 395, + 123, + 452, + 129 + ], + "type": "text", + "content": "**Date:** [Insert Date]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 395, + 129, + 472, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 129, + 472, + 136 + ], + "spans": [ + { + "bbox": [ + 395, + 129, + 472, + 136 + ], + "type": "text", + "content": "**Location:** [Insert Location]" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 395, + 137, + 539, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 137, + 539, + 164 + ], + "spans": [ + { + "bbox": [ + 395, + 137, + 539, + 164 + ], + "type": "text", + "content": "**Observation:** A solitary seabird, possibly a seagull or pelican, is seen soaring gracefully above the vast expanse of the ocean. The bird's wings are spread wide, indicating it is likely gliding or flying at a steady pace." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 395, + 165, + 539, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 165, + 539, + 221 + ], + "spans": [ + { + "bbox": [ + 395, + 165, + 539, + 221 + ], + "type": "text", + "content": "The water below appears calm, with gentle ripples on its surface, suggesting a serene environment. The bird's position and posture suggest it may be searching for food, such as fish or crustaceans, that are abundant in the ocean's depths. The absence of other birds or marine life in the immediate vicinity could imply that this bird is either hunting alone or simply enjoying a solitary flight over the sea." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 395, + 222, + 536, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 222, + 536, + 243 + ], + "spans": [ + { + "bbox": [ + 395, + 222, + 536, + 243 + ], + "type": "text", + "content": "\\*\\*Analysis:\\*\\* Seabirds like seagulls and pelicans are known for their ability to glide and soar on the wind currents, conserving energy while searching for food." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 395, + 244, + 535, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 244, + 535, + 281 + ], + "spans": [ + { + "bbox": [ + 395, + 244, + 535, + 281 + ], + "type": "text", + "content": "The bird's choice of location, over the ocean, indicates its reliance on marine resources for sustenance. The calmness of the water suggests favorable weather conditions, which are essential for successful hunting and gliding. This observation provides insight into the key role that birds play in the formation of such environments." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 395, + 286, + 536, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 286, + 536, + 300 + ], + "spans": [ + { + "bbox": [ + 395, + 286, + 536, + 300 + ], + "type": "text", + "content": "highlighting their adaptability to oceanic environments and their role in the marine ecosystem." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 78, + 370, + 395, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 370, + 395, + 385 + ], + "spans": [ + { + "bbox": [ + 78, + 370, + 395, + 385 + ], + "type": "text", + "content": "Prompt to choose verified function and extract params" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 89, + 412, + 514, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 412, + 514, + 453 + ], + "spans": [ + { + "bbox": [ + 89, + 412, + 514, + 453 + ], + "type": "text", + "content": "Please analyze the following constraint and select the most appropriate function from the given list to verify this constraint. Then extract the required parameters for the verification function from the constraint." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 89, + 468, + 278, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 468, + 278, + 483 + ], + "spans": [ + { + "bbox": [ + 89, + 468, + 278, + 483 + ], + "type": "text", + "content": "Constraint content: {constraint value}" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 89, + 496, + 375, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 496, + 375, + 525 + ], + "spans": [ + { + "bbox": [ + 89, + 496, + 375, + 525 + ], + "type": "text", + "content": "Available verification functions: {all candidate validation function names and parameters}" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 89, + 537, + 342, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 537, + 342, + 551 + ], + "spans": [ + { + "bbox": [ + 89, + 537, + 342, + 551 + ], + "type": "text", + "content": "Please complete the analysis following these steps:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 91, + 552, + 161, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 552, + 161, + 563 + ], + "spans": [ + { + "bbox": [ + 91, + 552, + 161, + 563 + ], + "type": "text", + "content": "**Your task:**" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 89, + 566, + 520, + 620 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 89, + 566, + 520, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 566, + 520, + 593 + ], + "spans": [ + { + "bbox": [ + 89, + 566, + 520, + 593 + ], + "type": "text", + "content": "1. Select the most appropriate verification function from the above list (return empty if none is suitable)" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 89, + 594, + 469, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 594, + 469, + 620 + ], + "spans": [ + { + "bbox": [ + 89, + 594, + 469, + 620 + ], + "type": "text", + "content": "2. Extract the required parameters from the constraint based on the function description" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 89, + 635, + 358, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 635, + 358, + 663 + ], + "spans": [ + { + "bbox": [ + 89, + 635, + 358, + 663 + ], + "type": "text", + "content": "**Please return the result in JSON format as follows:** {output format}" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 97, + 232, + 276, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 232, + 276, + 250 + ], + "spans": [ + { + "bbox": [ + 97, + 232, + 276, + 250 + ], + "type": "text", + "content": "Compare Judge Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 265, + 515, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 265, + 515, + 331 + ], + "spans": [ + { + "bbox": [ + 88, + 265, + 515, + 331 + ], + "type": "text", + "content": "You are an expert in judging whether the response follows the given constraint. Your task is to assess whether the model's response satisfies the given constraint and return True or False. I will provide you with the constraint and the model's response under this constraint. To assist with your evaluation, I will also provide you with the model's response to the same question without the constraint." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 332, + 205, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 332, + 205, + 345 + ], + "spans": [ + { + "bbox": [ + 88, + 332, + 205, + 345 + ], + "type": "text", + "content": "Constraint: {constraint}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 346, + 352, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 346, + 352, + 358 + ], + "spans": [ + { + "bbox": [ + 88, + 346, + 352, + 358 + ], + "type": "text", + "content": "Response under the constraint: {pred_with Constraint}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 88, + 359, + 376, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 359, + 376, + 372 + ], + "spans": [ + { + "bbox": [ + 88, + 359, + 376, + 372 + ], + "type": "text", + "content": "Response without the constraint: {pred Without constraint}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 385, + 307, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 385, + 307, + 398 + ], + "spans": [ + { + "bbox": [ + 88, + 385, + 307, + 398 + ], + "type": "text", + "content": "**Please follow the steps below to evaluate**:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 399, + 525, + 491 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 88, + 399, + 515, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 399, + 515, + 438 + ], + "spans": [ + { + "bbox": [ + 88, + 399, + 515, + 438 + ], + "type": "text", + "content": "Step 1. Compare the model's response under the constraint with its response without the constraint. If you believe these two answers are very similar, it means the model has not fully considered the impact of the constraint on the answer. Please return False." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 439, + 525, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 439, + 525, + 491 + ], + "spans": [ + { + "bbox": [ + 88, + 439, + 525, + 491 + ], + "type": "text", + "content": "Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response does not meet the requirements specified in the constraint, return False. Otherwise, if the response effectively satisfies the constraint, return True." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 88, + 492, + 472, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 492, + 472, + 518 + ], + "spans": [ + { + "bbox": [ + 88, + 492, + 472, + 518 + ], + "type": "text", + "content": "**Response Format**: Your answer should only include \"True\" or \"False\", and no additional text." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 203, + 568, + 406, + 580 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 568, + 406, + 580 + ], + "spans": [ + { + "bbox": [ + 203, + 568, + 406, + 580 + ], + "type": "text", + "content": "Figure 21. Prompt template for Compare Judge Method" + } + ] + } + ], + "index": 10, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 233, + 240, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 233, + 240, + 251 + ], + "spans": [ + { + "bbox": [ + 85, + 233, + 240, + 251 + ], + "type": "text", + "content": "Direct Judge Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 268, + 512, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 268, + 512, + 296 + ], + "spans": [ + { + "bbox": [ + 85, + 268, + 512, + 296 + ], + "type": "text", + "content": "Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 86, + 297, + 387, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 297, + 387, + 310 + ], + "spans": [ + { + "bbox": [ + 86, + 297, + 387, + 310 + ], + "type": "text", + "content": "Please follow the requirements below to make the judgment:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 312, + 520, + 351 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 86, + 312, + 314, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 312, + 314, + 323 + ], + "spans": [ + { + "bbox": [ + 86, + 312, + 314, + 323 + ], + "type": "text", + "content": "1. Be strict and consistent in your assessment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 325, + 408, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 325, + 408, + 338 + ], + "spans": [ + { + "bbox": [ + 86, + 325, + 408, + 338 + ], + "type": "text", + "content": "2. You should refer to the content of image to make the judgment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 338, + 520, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 338, + 520, + 351 + ], + "spans": [ + { + "bbox": [ + 86, + 338, + 520, + 351 + ], + "type": "text", + "content": "3. For one constraint, if the response fails to fully meet the constraint, give it a score of 0." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 86, + 353, + 337, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 353, + 337, + 365 + ], + "spans": [ + { + "bbox": [ + 86, + 353, + 337, + 365 + ], + "type": "text", + "content": "Otherwise, give it a score of 1. " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 367, + 147, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 367, + 147, + 380 + ], + "spans": [ + { + "bbox": [ + 86, + 367, + 147, + 380 + ], + "type": "text", + "content": "{prediction}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 86, + 381, + 186, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 381, + 186, + 393 + ], + "spans": [ + { + "bbox": [ + 86, + 381, + 186, + 393 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 86, + 395, + 214, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 395, + 214, + 406 + ], + "spans": [ + { + "bbox": [ + 86, + 395, + 214, + 406 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 86, + 408, + 170, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 408, + 170, + 421 + ], + "spans": [ + { + "bbox": [ + 86, + 408, + 170, + 421 + ], + "type": "text", + "content": "{constraints_str}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 86, + 422, + 206, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 422, + 206, + 434 + ], + "spans": [ + { + "bbox": [ + 86, + 422, + 206, + 434 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 86, + 436, + 514, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 436, + 514, + 462 + ], + "spans": [ + { + "bbox": [ + 86, + 436, + 514, + 462 + ], + "type": "text", + "content": "You should judge and explain for each constraint in the constraint list without omitting any constraint. Finally, list scores of all the constraints in one sentence." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 86, + 464, + 302, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 464, + 302, + 475 + ], + "spans": [ + { + "bbox": [ + 86, + 464, + 302, + 475 + ], + "type": "text", + "content": "You should strictly follow the format below:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 86, + 478, + 154, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 478, + 154, + 490 + ], + "spans": [ + { + "bbox": [ + 86, + 478, + 154, + 490 + ], + "type": "text", + "content": "Judgement: ..." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 86, + 491, + 509, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 491, + 509, + 518 + ], + "spans": [ + { + "bbox": [ + 86, + 491, + 509, + 518 + ], + "type": "text", + "content": "Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of constraint_n: x/1." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 209, + 563, + 401, + 574 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 563, + 401, + 574 + ], + "spans": [ + { + "bbox": [ + 209, + 563, + 401, + 574 + ], + "type": "text", + "content": "Figure 22. Prompt template for Direct Judge Method" + } + ] + } + ], + "index": 17, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 122, + 177, + 487, + 583 + ], + "blocks": [ + { + "bbox": [ + 122, + 177, + 487, + 583 + ], + "lines": [ + { + "bbox": [ + 122, + 177, + 487, + 583 + ], + "spans": [ + { + "bbox": [ + 122, + 177, + 487, + 583 + ], + "type": "table", + "html": "
Main ClassSubclassEvaluationDescriptionExample
A. Rhetoric & LogicA.1 Rhetoric requirementsCompare JudgeConstraint that requires the response to use a specific rhetorical technique.“Your output should include a metaphor.”
A.2 Logical relationDirect JudgeConstraint that ensures logical cohesion within the response by requiring specific logical connectors or structures.“Each paragraph must contain at least one cause-and-effect relationship.”
B. Format limitB.1 Natural languageDirect JudgeConstraint specifying which natural language(s) should be used in the response.“Please answer in Spanish.”
B.2 Part of speechDirect JudgeConstraint that requires the response to use a specific part of speech.“Use at least three adjectives in your response.”
B.3 Sentence structureDirect JudgeConstraint that specifies special sentence structures to be used in the response.“Write each sentence so it includes a parenthetical phrase.”
B.4 Tense requirementsDirect JudgeConstraint that specifies the use of multiple tenses within the response.“In past tense totally.”
B.5 PunctuationRule-baseConstraint specifying unconventional yet feasible punctuation usage in the response.“Replace all periods with semicolons.”
B.6 HighlightDirect JudgeConstraint that specifies a unique but manageable method for highlighting text.“Use **bold** for every noun.”
B.7 Title requirementsDirect JudgeConstraint that specifies how titles should be added to the response.“Provide a concise title that summarizes the main idea.”
B.8 Style requirementsCompare JudgeConstraint that specifies an unconventional or distinctive writing style for the response.“Write the answer in the form of a brief detective story.”
B.9 Case requirementsDirect JudgeConstraint specifying an unusual yet readable approach to letter case in the response.“Write all nouns in UPPERCASE and all adjectives in lowercase.”
B.10 Unstrict formatDirect JudgeConstraint specifying a unique format for the output while keeping it approachable.“Format your response as a short play script with speaker labels.”
B.11 Strict formatDirect JudgeConstraint that requires the response to follow a strictly defined format.“Please provide the output as well-formed XML with custom tags.”
B.12 Number and ListDirect JudgeConstraint for using numbered or bulleted lists in the response.“Present all key points as a numbered list with bulleted sub-lists.”
B.13 Wrap upDirect JudgeConstraint that requires a concise, well-structured summary or conclusion.“Provide a final paragraph summarizing the key arguments.”
B.14 First letterDirect JudgeConstraint specifying a pattern for the first letters of sentences or paragraphs.“Each sentence should begin with a letter that progresses through the alphabet.”
C. Text Length limitC.1 Paragraph limitRule-baseConstraint that specifies the number of paragraphs in the response.“Your response must consist of exactly 4 paragraphs.”
C.2 Sentence limitRule-baseConstraint that specifies the number of sentences in each paragraph.“Totally use 5 sentences in your response.”
C.3 Word limitRule-baseConstraint that specifies a small range for the total number of words in the text.“Your response must be a single word or phrase.”
D. Math limitD.1 PrecisionRule-baseConstraint that specifies the level of precision required in mathematical calculations.“Keep two decimal places for all numbers in the answer.”
D.2 Scientific notationRule-baseConstraint that requires the use of scientific notation for large or small numbers.“Express all numbers greater than 1,000 in scientific notation.”
E. Action limitE.1 Role imitationCompare JudgeConstraint requiring the response to imitate the tone and style of a specific role or public figure.“Please answer in the style of a sports commentator.”
E.2 Prefix and SuffixRule-baseConstraint that requires the response to begin or end with a specific phrase or symbol.“Please start your answer with ‘Once upon a time...’”
E.3 Tone requirementCompare JudgeConstraint specifying an emotional tone for the response.“Write your answer in a positive and encouraging tone.”
E.4 PerspectiveDirect JudgeConstraint that specifies a narrative perspective for the response.“Write your answer in the first-person singular as a personal account.”
E.5 Target audienceCompare JudgeConstraint requiring the response to be tailored for a specific audience.“Craft your response as if explaining to high school students.”
E.6 SituationCompare JudgeConstraint requiring the response to be set in a specific situation or scenario.“Answer as if you are giving safety instructions before a flight.”
E.7 Prior conditionDirect JudgeConstraint stating that when a specific condition is met, the response must follow a particular process.“If the user requests legal advice, begin with a disclaimer.”
F. KeywordF.1 MentionRule-base & Direct JudgeConstraint that requires including a specific keyword a certain number of times.“Mention ‘GreenTech’ exactly three times throughout.”
F.2 Not mentionRule-base & Direct JudgeConstraint that requires avoiding specific keywords or phrases.“Do not mention the words ‘budget’ or ‘investment’.”
F.3 Multiple mentionRule-base & Direct JudgeConstraint requiring including multiple specified keywords in a balanced manner.“Mention both ‘sustainability’ and ‘renewable energy’ at least twice.”
F.4 Keyword variationDirect JudgeConstraint requiring the use of synonyms or variations of a given keyword.“Use at least three synonyms for ‘innovation’ throughout your text.”
", + "image_path": "9869dc9133a474107a7d86c90dc56f607a78d7c877ecf631366616bdfd7852f9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 175, + 592, + 434, + 602 + ], + "lines": [ + { + "bbox": [ + 175, + 592, + 434, + 602 + ], + "spans": [ + { + "bbox": [ + 175, + 592, + 434, + 602 + ], + "type": "text", + "content": "Table 5. Constraint Categories and Evaluation Methods for MM-IFEval" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 301, + 732, + 310, + 741 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 256, + 477, + 504 + ], + "blocks": [ + { + "bbox": [ + 133, + 256, + 477, + 504 + ], + "lines": [ + { + "bbox": [ + 133, + 256, + 477, + 504 + ], + "spans": [ + { + "bbox": [ + 133, + 256, + 477, + 504 + ], + "type": "table", + "html": "
CategoryInstruction
Descriptive AnalysisDescribe the animal's typical habitat, diet, and one unique behavioral trait.
Provide a detailed analysis of the image, including the setting, characters, and notable objects.
Explain the activity taking place in the image.
Describe the activities of the person on the left in the image.
Emotional & PerspectiveWhat emotions do you think the person in this image might be feeling?
Imagine you are the person on the left in the scene depicted in this image, write a story about what you would do next.
Personify the sign in the image and express its feelings about the rule it presents.
Creative WritingCreate a short conversation between any two individuals in the scene.
Pretend this snapshot belongs to a larger story. Write a quick paragraph setting up the next plot twist.
Use this picture as your muse. Craft a brief poem—any style—that captures the emotion you sense.
Turn this scene into a short children's story focusing on wonder and curiosity.
Write a short poem with two stanzas, inspired by the emotion or content depicted in this image.
Social Media & ContentAssume this is an image you are about to post on Twitter. Please provide a short, upbeat caption describing it.
Assume you are creating a Pinterest pin with this image. Write a short inspirational or motivational caption to accompany it.
If this image were promoting an upcoming event, compose a quick announcement with the date, a highlight of what to expect, and a call-to-action.
Role PlayImagine you are the photographer who took this picture. Briefly explain why you chose to capture this particular moment and what story you hope it conveys.
", + "image_path": "a0eaad4b1104e3743ad2ff61115c055cb525b491b47d3414f47134eaa66179b5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 236, + 514, + 373, + 524 + ], + "lines": [ + { + "bbox": [ + 236, + 514, + 373, + 524 + ], + "spans": [ + { + "bbox": [ + 236, + 514, + 373, + 524 + ], + "type": "text", + "content": "Table 6. Task Pool for MM-IFEngine" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 101, + 178, + 509, + 581 + ], + "blocks": [ + { + "bbox": [ + 101, + 178, + 509, + 581 + ], + "lines": [ + { + "bbox": [ + 101, + 178, + 509, + 581 + ], + "spans": [ + { + "bbox": [ + 101, + 178, + 509, + 581 + ], + "type": "table", + "html": "
Verified Function NameFunction ParametersConstraint ExampleParameter Example
check Whether\\_responseParagraph\\_number_in_rangelower_bound:int,upper_bound:intThe number of text paragraphs be at least 3[3, 10000]
check Whether\\_response\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences be exactly 3[3, 3]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_rangelower_bound:int,upper_bound:intThe number of sentences in each paragraph be less than 3[0, 2]
check Whether\\_each\\_paragraph\\_sentence\\_number_in_range_listranges:List[tuple]The number of sentences in the first paragraph be exactly 3, and in the second paragraph be at most 2[(3, 3), (1, 2)]
check Whether\\_each\\_paragraph\\_sentence\\_number_exceedsexceed_num:int,upper_bound:intEach new paragraph should have 1 sentence more than the previous one, no paragraph exceeds 7 sentences[1, 7]
check Whether\\_response_word_count_in_rangelower_bound:int,upper_bound:intThe number of words should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_rangelower_bound:int,upper_bound:intThe number of words in each paragraph should be between 50 and 80[50, 80]
check Whether\\_each\\_paragraph\\_word_count_in_range_listranges:List[tuple]The number of words in the first paragraph be between 20 and 30, in the second between 50 and 80[(20, 30), (50, 80)]
check Whether\\_whole\\_response_notContain_certain_substringsubstring:strThe response should not contain the word "apple"["apple"]
check Whether\\_whole\\_response_notContain_certain_substringssubstrings:List[str]The response should not contain the words "apple" and "banana"[["apple", "banana"]]
check Whether\\_each\\_sentence_begin_with_certain_substringsubstring:strEach sentence should start with exclamation point["!"]
check Whether\\_each\\_sentence_end_with_certain_substringsubstring:strEach sentence should end with "apple"["apple"]}
check Whether\\_whole\\_response_begin_with_certain_substringsubstring:strThe response should start with "apple"["apple"]}
check Whether\\_whole\\_response_end_with_certain_substringsubstring:strThe response should end with "apple"["apple"]}
check Whether\\_keywords_metioned_in_rangekeywords:List[str], lower_bound(times:int, upper_bound(times):intThe response should mention the word "apple" at least 3 times[["apple"], 3, 10000]
check_number_precision_in_responseprecision:intThe numbers in the response should have 2 decimal places[2]
check Whether has no\\_number_in_response-The response should not contain any number[]
check Scientific_notation\\_precision_in_responsesignificantDigits:intThe numbers in the response should have 3 significant digits[3]
", + "image_path": "746967d3754041f1cfef9cc64a9093ae1f7ea58de21ff7208c444192c75c151d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 160, + 590, + 449, + 599 + ], + "lines": [ + { + "bbox": [ + 160, + 590, + 449, + 599 + ], + "spans": [ + { + "bbox": [ + 160, + 590, + 449, + 599 + ], + "type": "text", + "content": "Table 7. Verification Functions for rule-based evaluation method in MM-IFEval" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 301, + 732, + 310, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_content_list.json b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..19b1630c86cb1494a8f5a723ccce6141d493552e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_content_list.json @@ -0,0 +1,2599 @@ +[ + { + "type": "text", + "text": "Detect Anything 3D in the Wild", + "text_level": 1, + "bbox": [ + 338, + 130, + 658, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hanxue Zhang $^{1,2*}$ , Haoran Jiang $^{1,3*}$ , Qingsong Yao $^{4*}$ , Yanan Sun $^{1}$ , Renrui Zhang $^{5}$ , Hao Zhao $^{6}$ , Hongyang Li $^{1}$ , Hongzi Zhu $^{2}$ , Zetong Yang $^{1,7}$", + "bbox": [ + 174, + 179, + 823, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 OpenDriveLab at Shanghai AI Laboratory 2 Shanghai Jiao Tong University 3 Fudan University 4 Stanford University 5 CUHK MMLab 6 Tsinghua University 7 GAC R&D Center", + "bbox": [ + 101, + 222, + 898, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://github.com/OpenDriveLab/DetAny3D", + "bbox": [ + 300, + 263, + 697, + 280 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg", + "image_caption": [ + "Figure 1. Introducing DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object with arbitrary monocular images in diverse scenes. Our framework enables multi-prompt interaction (e.g., box, point, and text) to deliver open-world 3D detection results ( $w \\times h \\times l$ in centimeter) for novel objects across various domains. It achieves significant zero-shot generalization, outperforming SOTA by up to 21.02 and 5.68 AP3D on novel categories and novel datasets with new camera configurations." + ], + "image_footnote": [], + "bbox": [ + 93, + 305, + 903, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 731, + 326, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite the success of deep learning in close-set 3D object detection, existing approaches struggle with zero-shot generalization to novel objects and camera configurations. We introduce DetAny3D, a promptable 3D detection foundation model capable of detecting any novel object under arbitrary camera configurations using only monocular inputs. Training a foundation model for 3D detection is fun", + "bbox": [ + 88, + 766, + 482, + 872 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "damentally constrained by the limited availability of annotated 3D data, which motivates DetAny3D to leverage the rich prior knowledge embedded in extensively pre-trained 2D foundation models to compensate for this scarcity. To effectively transfer 2D knowledge to 3D, DetAny3D incorporates two core modules: the 2D Aggregator, which aligns features from different 2D foundation models, and the 3D Interpreter with Zero-Embedding Mapping, which stabilizes early training in 2D-to-3D knowledge transfer. Experimental results validate the strong generalization of our DetAny3D, which not only achieves state-of-the-art performance on unseen categories and novel camera configura", + "bbox": [ + 511, + 718, + 906, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07958v3 [cs.CV] 2 Dec 2025", + "bbox": [ + 22, + 282, + 57, + 713 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 114, + 887, + 225, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tions, but also surpasses most competitors on in-domain data. DetAny3D sheds light on the potential of the 3D foundation model for diverse applications in real-world scenarios, e.g., rare object detection in autonomous driving, and demonstrates promise for further exploration of 3D-centric tasks in open-world settings. More visualization results can be found at our code repository.", + "bbox": [ + 89, + 90, + 480, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 224, + 222, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D object detection is a fundamental technology for autonomous systems [12, 14, 15, 36, 48, 49], robotics [6, 67, 84], and augmented reality [43, 52]. 3D perception not only enables machines to perceive and interact with the physical world, but also serves as a foundational input for more advanced tasks, such as behavior decision [3, 11, 20, 31], world modeling [22, 23, 38] and 3D scene reconstruction [50, 73, 75]. For practical deployment, a generalizable 3D detector ideally should detect arbitrary objects from easily accessible inputs, such as monocular images, without relying on specific sensor parameters. Such a model would be highly adaptable and reliable for various downstream tasks in diverse and unpredictable environments [15, 36, 43, 84]. Also, accurate detection results provided by such a detector (e.g., generating 3D bounding boxes for even images from the internet) make it a versatile tool, paving the way for scalable 3D systems that leverage Internet-scale data and advance toward open-world scenarios [22, 23, 38, 50, 73].", + "bbox": [ + 89, + 250, + 482, + 522 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Previous research, exemplified by Omni3D [8], has attempted to improve the generalization of the 3D detection system through multi-dataset training [8, 35, 40, 68]. However, despite utilizing large datasets to train a unified detector [8, 40], these approaches provide limited generalization to novel camera configurations and cannot detect unseen object categories beyond predefined label spaces. Therefore, developing a 3D detection foundation model with strong zero-shot generalizability, which is capable of detecting any unseen object under arbitrary camera configurations, remains a crucial and unsolved problem.", + "bbox": [ + 88, + 523, + 482, + 688 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While recent advances in 2D foundation models [33, 44, 51, 56] demonstrate remarkable zero-shot capabilities. Segment Anything Model (SAM) [33] features a promptable inference mechanism, supporting user-friendly prompts like points and boxes to segment user-specified objects. Their impressive generalization ability stems from training on billions of annotated images. However, in 3D object detection, the available labeled data is limited to only millions of samples—typically 3-4 orders of magnitude smaller than in 2D images. Such severe data scarcity [74, 86] poses a fundamental challenge, making it nearly infeasible to train a 3D foundation model from scratch.", + "bbox": [ + 89, + 688, + 482, + 868 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we present DetAny3D, a promptable 3D detection foundation model designed for generalizable 3D", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "object detection using only monocular images (see Figure 1). Given the inherent scarcity of 3D annotated data, we achieve strong generalization from two critical perspectives: model architecture and data utilization. The central insight of our approach is to leverage the extensive prior knowledge encoded within two broadly pre-trained 2D foundation models—SAM [33] and DINO [10, 51)—thus unlocking effective zero-shot 3D detection capabilities with minimal available 3D data.", + "bbox": [ + 511, + 90, + 903, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, we adopt SAM as our promptable backbone, capitalizing on its versatile and robust object understanding capability derived from large-scale 2D data. Concurrently, we utilize DINO [51] depth-pretrained by UniDepth [54], to offer redundant 3D geometric priors [7, 76], which plays a pivotal role for accurate 3D detection in a monocular setting. To integrate the complementary features from SAM and DINO more effectively, we propose the 2D Aggregator, an attention-based mechanism that aligns these features and dynamically optimizes their contributions via learnable gating. 2D Aggregator fully exploits the strengths of each foundation model.", + "bbox": [ + 511, + 229, + 903, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To further address the challenge of effectively transferring knowledge from 2D to 3D, we introduce the 3D Interpreter. Central to the 3D Interpreter is the Zero-Embedding Mapping (ZEM) mechanism, which ensures stable 2D-to-3D mapping by reducing early-stage interference and preserving pretrained 2D priors. By stabilizing the training process across diverse datasets with varying camera parameters, scene complexities, and depth distributions, the ZEM mechanism enables progressive zero-shot 3D grounding capabilities, significantly enhancing model generalization.", + "bbox": [ + 511, + 412, + 903, + 564 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To leverage as much 3D-related data as possible, we aggregate a diverse range of datasets, including 16 datasets spanning depth with intrinsic data and 3D detection data, referred as DA3D. Experimental results, using prompts aligned with the baselines, demonstrate three key advantages: (1) Generalization to novel classes: achieves $21.0\\%$ , $4.3\\%$ , $11.3\\%$ higher zero-shot $\\mathrm{AP}_{3\\mathrm{D}}$ than baselines on novel categories on KITTI, SUNRGBD, and ARKitScenes. (2) Generalization to novel cameras: improves cross-dataset performance by $4.7\\%$ , $5.7\\%$ and $1.1\\%$ $\\mathrm{AP}_{3\\mathrm{D}}$ compared to baseline methods on zero-shot datasets Cityscapes3D, Waymo and 3RScan. (3) Performance on in-domain data: surpasses baseline by $1.6\\%$ $\\mathrm{AP}_{3\\mathrm{D}}$ on Omni3D. Core contributions are summarized in following:", + "bbox": [ + 511, + 566, + 903, + 777 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We develop DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object in real-world scenarios with arbitrary monocular inputs.", + "- DetAny3D introduces 2D Aggregator to effectively fuse the features from two 2D foundation models SAM and depth-pretrained DINO, which provide pivot shape and 3D geometric priors for various objects, respectively.", + "- In 2D-to-3D knowledge transfer, DetAny3D involves" + ], + "bbox": [ + 511, + 779, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Zero-Embedding Mapping in 3D Interpreter to achieve stable 2D-to-3D mapping, enabling the model to train robustly across datasets with diverse camera parameters, varying scenes, and different depth distributions.", + "bbox": [ + 102, + 90, + 480, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- The experimental results demonstrate significant advantages of DetAny3D, particularly in accurately detecting unseen 3D objects with arbitrary camera parameters in the zero-shot setting, showcasing its potential across a wide range of real-world applications.", + "bbox": [ + 91, + 152, + 480, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related works", + "text_level": 1, + "bbox": [ + 89, + 243, + 235, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. 3D Object Detection", + "text_level": 1, + "bbox": [ + 89, + 268, + 282, + 284 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing 3D object detection systems have predominantly focused on single-dataset optimization, achieving strong performance on benchmark datasets like KITTI [24] and nuScenes [9] through task-specific architectures [14, 18, 39, 41, 42, 45, 66, 80]. While effective in constrained scenarios, these approaches suffer from significant domain gaps when deployed in new contexts, primarily due to their reliance on limited sensor-specific data and closed-set assumptions. Recent works, exemplified by Omni3D [8], have demonstrated the potential of multi-dataset training. Models like Cube R-CNN [8] and UniMODE [40] train a universal monocular 3D detector across multiple datasets, achieving some level of robustness to camera parameters, but are still restricted to predefined classes. V-MIND [32] further addresses the data scarcity challenge by generating pseudo 3D training data from large-scale 2D annotations. Towards more general detection, OV-Uni3DETR [69] pioneers openset detection that is able to detect with multimodal inputs, but it is trained separately for indoor and outdoor domains, thereby limiting its overall generalization. More recently, OVMono3D [74] leverages Grounding DINO's [44] 2D results with a 3D head on unified datasets. However, it does not fully exploit the priors contained in 2D foundation models, leading to performance constraints tied to the limited 3D data. In contrast, our approach fully capitalizes on the knowledge distilled in 2D foundation models while leveraging abundant 3D-related data, thereby enabling the detection of any 3D object from arbitrary monocular inputs.", + "bbox": [ + 89, + 291, + 483, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Vision Foundation Models", + "text_level": 1, + "bbox": [ + 89, + 726, + 328, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Foundation models have demonstrated significant potential across various domains. For example, language foundation models such as GPT-4 [1] and DeepSeek [5, 26], trained on massive internet-scale corpora, have achieved impressive capabilities in natural language processing across diverse fields [1, 5, 60, 63, 81, 82]. Similarly, foundation models in the vision domain have made remarkable strides [29, 33, 37, 44, 51, 56, 79]. DINOv2 [51], trained on a vast range of curated data from diverse sources, is capable of producing general-purpose visual features that work", + "bbox": [ + 89, + 750, + 482, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "seamlessly across different image distributions and tasks. SAM [33] has taken a step further in the vision domain by introducing promptability, enabling models to generalize to novel visual concepts through large-scale data training and continuous model refinement. In recent years, the development of foundation models in the 3D domain has started to take initial steps [13, 28, 55, 78, 83, 85]. Most existing 3D foundation models are often combined with vision-language models (VLMs) [13, 27, 55, 85], relying on point clouds as input to help the language models understand 3D [13, 85]. While these methods are valuable for scene understanding and semantic tasks, they do not directly provide precise 3D detection results. Moreover, point cloud inputs significantly restrict the use cases [72], as they are not always accessible in many practical scenarios. In contrast to these approaches, we aim to develop a foundation model specifically dedicated to 3D detection tasks with the most general inputs, monocular images. By leveraging the powerful priors from 2D vision foundation models, our approach enables the detection of any 3D object with arbitrary camera configurations, presenting a broad range of practical applications.", + "bbox": [ + 511, + 90, + 906, + 424 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Detect Anything 3D in the Wild", + "text_level": 1, + "bbox": [ + 511, + 440, + 803, + 458 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 511, + 467, + 624, + 482 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated in Figure 2(a), DetAny3D takes a monocular RGB image and prompts (e.g., boxes, points, text, intrinsic) as input. The box, point, and text prompts are used to specify objects, while the intrinsic prompts are optional. When not provided, the model predicts the intrinsic parameters and the corresponding 3D detection results. If intrinsic are available, the model can leverage them as geometric constraints to mitigate the ill-posed nature of monocular depth estimation and calibrate its detection results.", + "bbox": [ + 511, + 489, + 906, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, the monocular image is embedded in parallel by two foundational models: SAM [33] for low-level pixel information, underpins the entire promptable architecture. And depth-pretrained DINO [51, 54], which provide rich high-level geometric knowledge, excels in depth-related tasks. These complementary 2D features are then fused through our proposed 2D Aggregator (see Figure 2(b)), which hierarchically aligns low-level and high-level information using cross-attention layers. The fused features are subsequently passed to the Depth/Camera Module, which extracts the camera and camera-aware depth embedding, collectively referred to as geometric embedding.", + "bbox": [ + 511, + 628, + 908, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The geometric embedding and the 3D bounding box tokens with encoded prompt tokens are then fed into the 3D Interpreter (see Figure 2(c)), which employs a structure similar to the SAM decoder along with a specialized Zero-Embedding Mapping (ZEM) mechanism. 3D Interpreter injects 3D geometric features while ensuring stable 2D-to", + "bbox": [ + 511, + 810, + 908, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 90, + 890, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg", + "image_caption": [ + "Figure 2. Overview of DetAny3D. It supports arbitrary monocular images as input and performs 3D object detection driven by prompts—box, point, and text to specify target objects and optional camera calibration to calibrate geometric projections. DetAny3D comprises two key modules: (b) 2D Aggregator, which employs a hierarchical cross-attention mechanism to dynamically fuse knowledge from SAM and DINO, with a learnable gate controlling each component's contribution to the geometric embedding; (c) 3D Interpreter, which introduces a Zero-Embedding Mapping (ZEM) strategy based on zero-initialized layers to gradually inject geometric priors, thereby enables zero-shot 3D grounding and avoids catastrophic forgetting during knowledge transfer." + ], + "image_footnote": [], + "bbox": [ + 96, + 250, + 890, + 377 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D knowledge transfer, enabling progressive 3D grounding across diverse data domains. Finally, the model predicts 3D boxes based on the hidden states of the 3D box tokens. Our DetAny3D is trained on selected seen classes and can detect any unseen classes in a zero-shot manner.", + "bbox": [ + 89, + 483, + 483, + 559 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2D Aggregator", + "text_level": 1, + "bbox": [ + 89, + 571, + 241, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To effectively fuse multiple foundation models, we propose 2D Aggregator to aggregate features from SAM and DINO, mitigating potential conflicts between their heterogeneous representations. As illustrated in Figure 2(b), the 2D Aggregator fuses features from SAM and DINO in a hierarchical manner, progressively integrating spatial and geometric information across four cascaded alignment units.", + "bbox": [ + 89, + 595, + 482, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Feature Extraction. Given an input image, the SAM encoder extracts high-resolution spatial features $\\mathbf{F}_s\\in$ $\\mathbb{R}^{H_s\\times W_s\\times C}$ , capturing fine-grained details and boundaries. Simultaneously, the DINO encoder outputs geometry-aware embeddings $\\mathbf{F}_d\\in \\mathbb{R}^{H_d\\times W_d\\times C}$ , which is depth-pretrained by UniDepth [54] and provides robust priors for depth and intrinsics. Following the design of ViT Adapter [16], we also employ a convolutional structure to produce preliminary image features, denoted as $\\mathbf{F}_q^0$ , serving as the initial query for subsequent attention-based fusion.", + "bbox": [ + 89, + 702, + 482, + 853 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hierarchical Fusion. Each of the four alignment units fuses SAM and DINO features via cross-attention. In the $i$ -th unit, we first apply learnable gating weights $\\alpha_{i}$ (initial", + "bbox": [ + 89, + 854, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ized to 0.5) to combine the $i$ -th block of SAM features $\\mathbf{F}_s^i$ and DINO features $\\mathbf{F}_d^i$ as follows:", + "bbox": [ + 511, + 482, + 903, + 513 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {\\text {f u s e d}} ^ {i} = \\alpha_ {i} \\cdot \\mathbf {F} _ {s} ^ {i} + (1 - \\alpha_ {i}) \\cdot \\mathbf {F} _ {d} ^ {i}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 523, + 903, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use $\\mathbf{F}_{\\mathrm{fused}}^{i}$ as key and value, while the query feature $\\mathbf{F}_q^{i - 1}$ acts as the query in the cross-attention mechanism:", + "bbox": [ + 511, + 549, + 903, + 580 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {q} ^ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {F} _ {q} ^ {i - 1}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 590, + 903, + 609 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {F}} _ {\\text {f u s e d}} ^ {i} = \\operatorname {N o r m} \\left(\\mathbf {F} _ {\\text {f u s e d}} ^ {i} + \\mathbf {F} _ {q} ^ {i}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 619, + 903, + 640 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This design enables the model to dynamically emphasize SAM's spatial details or DINO's semantic and geometric cues at different hierarchy levels while minimizing interference between the two representations.", + "bbox": [ + 511, + 643, + 906, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometric Embeddings. The fused features $\\hat{\\mathbf{F}}_{\\mathrm{fused}}^i$ , $i \\in [1,2,3,4]$ , are subsequently processed by the depth and camera modules, following the UniDepth [54] architecture. Specifically, these modules predict the camera embedding $\\mathbf{C}$ and camera-aware depth embedding $\\mathbf{D}|\\mathbf{C}$ , referred as the geometric embedding $\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C}, \\mathbf{C}\\}$ . These modules provide aligned depth and camera parameters under the monocular depth ill-posed problem. Further details can be found in the Supplementary material Section 7.1.", + "bbox": [ + 511, + 703, + 906, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall, by progressively aligning multi-scale features and adaptively integrating their contributions, 2D Aggregator effectively leverages the strengths of both foundation models while minimizing potential conflicts.", + "bbox": [ + 511, + 840, + 905, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. 3D Interpreter", + "text_level": 1, + "bbox": [ + 91, + 90, + 240, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The diverse 3D object supervisions across various scenarios, depths, and camera intrinsics introduce challenges to model training. Our 3D Interpreter aims to progressively integrate geometric information while ensuring stable 2D-to-3D knowledge transfer. We introduce Zero-Embedding Mapping (ZEM) mechanism, which incrementally infuses 3D geometry into the decoder via zero-initialized layers—without disrupting the original 2D features. As Figure 2(c) shows, the 3D Interpreter comprises three main components: the Two-Way Transformer, the Geometric Transformer, and the 3D bounding box heads.", + "bbox": [ + 89, + 112, + 483, + 279 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Two-Way Transformer. Following the SAM design, we first concatenate the 3D bounding box tokens with prompt-related tokens to form the query:", + "bbox": [ + 89, + 280, + 483, + 325 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = \\left[ \\left[ \\mathbf {T} _ {\\mathrm {3 D}, 1}; \\mathbf {T} _ {\\mathrm {p}, 1} \\right], \\dots , \\left[ \\mathbf {T} _ {\\mathrm {3 D}, N}; \\mathbf {T} _ {\\mathrm {p}, N} \\right] \\right], \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 337, + 482, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{T}_{3\\mathrm{D},i}$ denotes the 3D bounding box token for the $i$ -th object, $\\mathbf{T}_{\\mathrm{p},i}$ is the prompt-related token, and $[\\cdot ;\\cdot ]$ denotes vector concatenation. The SAM encoder output $\\mathbf{F}_s$ serves as both key and value for the first Two-Way Transformer layer, yielding:", + "bbox": [ + 89, + 375, + 483, + 450 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {s} ^ {\\prime} = \\text {T w o W a y T r a n s} (\\mathbf {Q}, \\mathbf {F} _ {s}, \\mathbf {F} _ {s}). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 464, + 482, + 481 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The initialized parameters of two-way transformer are copied using pre-trained SAM decoder.", + "bbox": [ + 89, + 494, + 482, + 523 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Geometric Transformer. We then process the geometric embedding $\\mathbf{G}$ (from the 2D Aggregator) through the zero-initialized $1 \\times 1$ convolutional layer ZEM and add it to $\\mathbf{F}_s$ for use as key and value in the Geometric Transformer:", + "bbox": [ + 89, + 525, + 483, + 585 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {G} ^ {\\prime} = \\operatorname {G e o T r a n s} (\\mathbf {Q}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 598, + 482, + 616 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ZEM integrates the geometric embedding and avoids catastrophic forgetting in 2D features. Next, $\\mathbf{G}'$ is again passed through ZEM and combined with $\\mathbf{F}_s'$ . This enriched representation is used as key and value in the second Two-Way Transformer layer to generate object features $\\mathbf{O}$ :", + "bbox": [ + 89, + 628, + 483, + 704 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {O} = \\text {T w o W a y T r a n s} \\left(\\mathbf {Q} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 717, + 483, + 747 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ZEM also helps stabilize parameter updates in the two-way and geometric transformer training, preventing conflicts arising from diverse 3D object supervision.", + "bbox": [ + 89, + 750, + 482, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Bounding Box Heads. Finally, $\\mathbf{O}$ is fed into the 3D bounding box heads to calculate the final predictions, which follows typical architectures from standard 3D detection frameworks [8, 66, 80]: $B_{\\mathrm{3D}}(x,y,z,w,h,l,R,S)$ where $x,y,z$ specify the 3D box center, $w,h,l$ are its dimensions, $R$ is the rotation matrix, and $S$ is the predicted 3D Intersection over Union (IoU) score.", + "bbox": [ + 89, + 795, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Loss", + "text_level": 1, + "bbox": [ + 513, + 90, + 586, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our loss function comprises three components, the depth loss $\\mathcal{L}_{\\mathrm{depth}}$ , the camera intrinsic loss $\\mathcal{L}_{\\mathrm{cam}}$ , and the detection loss $\\mathcal{L}_{\\mathrm{det}}$ . The overall loss is defined as the sum of these three components. For depth loss $\\mathcal{L}_{\\mathrm{depth}}$ , we adopt the commonly used SILog loss [19, 64] to supervise depth prediction. For camera intrinsic loss $\\mathcal{L}_{\\mathrm{cam}}$ , we follow the dense camera ray approach [30, 54] to represent intrinsics and also employ the SILog loss to measure deviations between predicted and ground-truth parameters. At last, for detection loss $\\mathcal{L}_{\\mathrm{det}}$ , we use the smooth L1 loss [40, 66, 80] to regress 3D bounding boxes parameters and predicted IOU scores and the Chamfer loss [8, 74] for rotation matrices. Detailed formulations of these loss functions can be found in the supplementary material Section 7.3.", + "bbox": [ + 511, + 112, + 906, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Prompt Interaction", + "text_level": 1, + "bbox": [ + 511, + 335, + 699, + 351 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "DetAny3D supports point, box, and text prompts to detect 3D box for user-specified objects. To calibrate more precise depth for specific camera, DetAny3D allows users to specify the camera configuration via the intrinsic prompt.", + "bbox": [ + 511, + 358, + 905, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Box and Point Prompts. Following SAM's methodology, both box and point prompts are encoded based on their respective positions and embeddings. For the box prompt, two points (top-left and bottom-right corners) are used. The point prompt is derived by combining the positional encoding of the point and the corresponding embedding.", + "bbox": [ + 511, + 419, + 903, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Text Prompts. Recent 2D foundation models like Grounding DINO [44] are able to detect bounding box for the open-vocabulary object specified by users using text prompt. DetAny3D can further generate 3D bounding box using the prediction of Grounding DINO, which enables text as prompts in the zero-shot interface.", + "bbox": [ + 511, + 510, + 905, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Intrinsic Prompts. Unlike most existing 3D detectors that employ a fixed virtual camera and rely on GT intrinsics to recover the true depth, inspired by Unidepth, we predict intrinsics for camera-aware 3D detection. When no intrinsic prompt is given, the model infers intrinsics for outputs:", + "bbox": [ + 511, + 601, + 905, + 676 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {B o x} _ {3 D} = 3 \\text {D I n t e r p r e t o r} (\\mathbf {Q}, \\hat {\\mathbf {G}}, \\mathbf {F} _ {s}), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 689, + 903, + 708 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{\\mathbf{G}} = \\{\\mathbf{D}|\\hat{\\mathbf{C}},\\hat{\\mathbf{C}}\\}$ , $\\hat{\\mathbf{C}}$ is the predicted camera embedding, and $\\mathbf{D}|\\hat{\\mathbf{C}}$ is the depth embedding conditioned on the predicted camera embedding. When intrinsic prompts are given, the model refines the 3D detection results based on the true intrinsic:", + "bbox": [ + 511, + 720, + 905, + 796 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {B o x} _ {3 D} = 3 \\mathrm {D I n t e r p r e t o r} (\\mathbf {Q}, \\mathbf {G}, \\mathbf {F} _ {s}), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 810, + 903, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C},\\mathbf{C}\\}$ . This boosts performance on both intrinsic prediction and 3D detection since the model continuously predicts and aligns the intrinsic with the 3D detection rather than estimating it solely from input image.", + "bbox": [ + 511, + 839, + 905, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 89, + 90, + 215, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 89, + 114, + 279, + 132 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "DA3D Benchmark. We present DA3D, a unified 3D detection dataset that aggregates 16 diverse datasets for 3D detection and depth estimation. Building upon Omni3D's original datasets (Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]), we incorporate additional four outdoor detection datasets (Argoverse2 [70], A2D2 [25], Waymo [62], Cityscapes3D [21]), one indoor detection dataset (3RScan [65]), and five depth and intrinsic datasets (Scannet [17], Taskonomy [77], DrivingStereo [71], Middlebury [59], IBIMS-1 [34]). All data is standardized with monocular images, camera intrinsics, 3D bounding boxes, and depth maps. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as zero-shot test classes. We select Cityscapes3D, Waymo, and 3RScan as our zero-shot datasets with novel camera configurations, where 3RScan also contains novel object categories. Depth supervision from LiDAR, RGB-D, and stereo sensors enhances $75\\%$ of training samples, while intrinsic parameters cover 20 camera configurations across 0.4 million frames $(2.5\\times$ Omni3D's scale). Dataset statistics and splits are detailed in Supplementary material Section 6. All data are subject to their respective licenses.", + "bbox": [ + 89, + 138, + 485, + 486 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We choose Cube R-CNN [8] and OV-Mono3D [74] as our primary baselines, as their settings align most closely with our experimental protocol: Cube R-CNN is a benchmark provided by the Omni3D dataset. It is a unified detector capable of performing detection on predefined categories. OVMono3D is a recently available open-vocabulary 3D detector on the Omni3D dataset. It lifts 2D detection to 3D by connecting the open-vocabulary 2D detector Grounding DINO [44] with a detection head.", + "bbox": [ + 89, + 488, + 483, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We adopt the metrics in the Omni3D benchmark [8], which is Average Precision (AP). Predictions are matched to ground-truth by measuring their overlap using IoU3D, which computes the intersection-over-union (IoU) of 3D cuboids. The IoU3D thresholds range from $\\tau \\in [0.05, 0.10, \\dots, 0.50]$ . For experiments using text prompts, we additionally employ target-aware metrics from OVMono3D [74]: Prompt the detector only with category names present in the per-image annotations instead of providing an exhaustive category list. This addresses severe naming ambiguity (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotation issues prevalent in indoor datasets like 3RScan (see Supplementary material Section 8.).", + "bbox": [ + 89, + 627, + 485, + 823 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. We implement DetAny3D via PyTorch [53]. We use the pretrained ViT-L DINOv2 [51, 54] and ViT-H SAM [33] as our initial models, with SAM serving as the promptable backbone, where the encoder is frozen during training. All main experiments are conducted", + "bbox": [ + 89, + 825, + 483, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "using 8 NVIDIA A100 machines with 8 GPUs for each and a batch size of 64. The model is trained for 80 epochs, taking approximately 2 weeks to complete. The training uses the AdamW [47] optimizer with an initial learning rate of 0.0001, adjusted according to the cosine annealing policy [46]. During box prompt training, we apply a 0.1 positional offset disturbance. For point prompt training, points are randomly selected from the mask. Text prompts are converted into box prompts via Grounding DINO SwinT [44]. For fair comparisons, all baseline-related experiments incorporate intrinsic prompts and use aligned prompt inputs.", + "bbox": [ + 511, + 90, + 906, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Main Results", + "text_level": 1, + "bbox": [ + 511, + 271, + 651, + 286 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Zero-shot Category Performance. In this experiment, we use two sources for the prompt input: text prompt processed by Grounding DINO and box prompt from ground-truth 2D bounding box. We evaluate our model on KITTI, SUNRGBD, and ARKitScenes datasets with the same zero-shot categories as OVMono3D [74]. As shown in Table 1 (left), our DetAny3D demonstrates superior zero-shot adaptation performance compared to the OVMono3D baseline. When using Grounding DINO for text prompt input, our method achieves significant improvements of $21.02\\mathrm{AP}_{3\\mathrm{D}}$ on KITTI, $4.29\\mathrm{AP}_{3\\mathrm{D}}$ on SUNRGBD, and $11.35\\mathrm{AP}_{3\\mathrm{D}}$ on ARKitScenes under the target-aware metric. When using 2D ground-truth as box prompt input, DetAny3D attains $28.96\\mathrm{AP}_{3\\mathrm{D}}$ on KITTI, $39.09\\mathrm{AP}_{3\\mathrm{D}}$ on SUNRGBD, and $57.72\\mathrm{AP}_{3\\mathrm{D}}$ on ARKitScenes, showing $3.4\\times$ , $2.3\\times$ , and $4.1\\times$ gains over the baseline, respectively. This substantial performance gap highlights our method's enhanced ability to generalize to novel object categories.", + "bbox": [ + 511, + 294, + 908, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Zero-shot Camera Performance. To assess robustness against novel camera parameters, we conduct cross-dataset evaluation as shown in Table 1 (right). For Cityscapes3D and Waymo, We use Cube R-CNN's 2D detections and ground-truth as box prompt and Grounding DINO processed text prompt for comparison. For 3RScan, due to namespace inconsistency with Cube R-CNN's predefined categories and the presence of novel classes, we only use text prompt and ground-truth box prompts, benchmarking against OVMono3D. DetAny3D exhibits strong adaptation to unseen camera configurations. When using Cube R-CNN-aligned prompts, our model achieves $\\mathrm{AP}_{3\\mathrm{D}}$ scores of 10.33 and 15.17 on Cityscapes3D and Waymo, respectively, surpassing Cube R-CNN by +2.11 and +5.74. With text prompts, under identical settings as OVMono3D [74], our method improves $\\mathrm{AP}_{3\\mathrm{D}}$ by +4.73 on Cityscapes3D, +5.68 on Waymo, and +1.1 on 3RScan under target-aware metrics. Both models show low scores on conventional metrics for 3RScan due to severe naming ambiguity and missing annotations. Using 2D ground-truth as box prompts, DetAny3D attains $\\mathrm{AP}_{3\\mathrm{D}}$ of 16.88, 15.83, and 21.36 across the three datasets, outperforming OVMono3D by +6.82, +5.6,", + "bbox": [ + 511, + 569, + 908, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/446c14e45132adeb8fa7de90a1ee31c1dd609b2e2e31f19f391ca1a5296821f4.jpg", + "table_caption": [ + "Table 1. Zero-shot 3D detection performance comparison on novel categories (left) and novel cameras (right). Results report $\\mathrm{AP}_{\\mathrm{3D}}$ with different prompt strategies: (1) Cube R-CNN, (2) Grounding DINO outputs (traditional metric / target-aware metric) and (3) Ground Truth. Target-aware metric uses per-image existing categories for prompting." + ], + "table_footnote": [], + "table_body": "
PromptMethodNovel CategoriesNovel Cameras
APkit3DAPsun3DAPpark3DAPcity3DAPwym3DAP3rs3D
-Cube R-CNN [8]---8.229.43-
Cube R-CNNOVMono3D [74]---4.9710.89-
DetAny3D (ours)---10.3315.17-
Δ---+5.36+4.28-
Grounding DINOOVMono3D [74]4.71 / 4.714.07 / 16.7813.21 / 13.215.88 / 10.989.20 / 10.270.37 / 8.48
DetAny3D (ours)25.73 / 25.737.63 / 21.0724.56 / 24.5611.05 / 15.7115.38 / 15.950.65 / 9.58
Δ+21.02 / +21.02+3.56 / +4.29+11.35 / +11.35+5.17 / +4.73+6.18 / +5.68+0.28 / +1.10
Ground TruthOVMono3D [74]8.4417.1614.1210.0610.2318.05
DetAny3D (ours)28.9639.0957.7216.8815.8321.36
Δ+20.52+21.93+43.60+6.82+5.60+3.31
", + "bbox": [ + 94, + 132, + 903, + 335 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d47015f2b7753c66309425332a31d6c53d79bb5088b122a1302179d821678c08.jpg", + "table_caption": [ + "Table 2. In-domain performance comparison between DetAny3D and baselines. The first three columns show results trained only on NuScenes and KITTI, while the next seven columns show results trained on the unified dataset. Two prompt sources are used: (1) Cube R-CNN 2D detections, (2) Ground Truth." + ], + "table_footnote": [], + "table_body": "
MethodOmni3D_OUTOMni3D
APkit3D↑APnus3D↑APout3D↑APkit3D↑APnus3D↑APsun3D↑APark3D↑APobj3D↑APhyp3D↑AP3D↑
ImVoxelNet [58]23.523.421.5------9.4
SMOKE [45]25.920.420.0------10.4
OV-Uni3DETR [68]35.133.031.6-------
Cube R-CNN [8]36.032.731.932.5030.0615.3341.7350.847.4823.26
OVMono3D [74]w/Cube RCNN---25.4524.3315.2041.6058.877.7522.98
DetAny3D (ours)w/Cube RCNN35.833.932.231.6130.9718.9646.1354.427.1724.92
OVMono3D [74]w/Ground Truth---33.6923.7927.8340.8556.6411.9925.32
DetAny3D (ours)w/Ground Truth38.036.735.938.6837.5546.1450.6256.8215.9834.38
", + "bbox": [ + 94, + 382, + 903, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and $+3.31$ , respectively. These results highlight the effectiveness of our architecture and its potential for real-world applications with arbitrary camera configurations.", + "bbox": [ + 89, + 547, + 482, + 593 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In-domain Performance We also evaluate our model's in-domain detection capability using two prompt sources: 2D detections from Cube R-CNN and 2D ground-truth. Besides the unified model, we also train DetAny3D on Omni3D_out for comparison. As shown in Table 2, DetAny3D achieves competitive results with Cube R-CNN when provided with aligned input. Using GT prompts, DetAny3D outperforms OVMono3D by $9.06\\mathrm{AP}_{3\\mathrm{D}}$ , indicating that Cube R-CNN may bottleneck performance, and stronger 2D prompts could further boost results.", + "bbox": [ + 89, + 594, + 482, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Possible Applications of DetAny3D", + "text_level": 1, + "bbox": [ + 89, + 756, + 393, + 772 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Other than robustly detecting diverse corner cases in real-world tasks such as autonomous driving and embodied perception, DetAny3D's open-world detection results can further serve as inputs for advanced downstream tasks.", + "bbox": [ + 89, + 777, + 482, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D Bounding Box Guided Video Generation. We feed DetAny3D outputs into Sora for zero-shot, open-world 3D box guided video generation. As shown in Figure 3, we compare: (i) image + 3D box + text, (ii) image + 2D box +", + "bbox": [ + 89, + 839, + 483, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg", + "image_caption": [ + "Figure 3. Zero-Shot Transfer Video Generation via Sora. We provide Sora with Internet-sourced images. As shown, when controlled with 3D bounding box, Sora can better capture the scene's geometric relationships. In contrast, with only controlled by 2D bounding box prompt, Sora respects pixel-level spatial cues but fails to generate accurate geometric offset." + ], + "image_footnote": [], + "bbox": [ + 516, + 551, + 906, + 790 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 88, + 356, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 89, + 620, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 90, + 887, + 224 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg", + "image_caption": [ + "Figure 4. Qualitative Results. We present qualitative examples from open-world detection. In each pair of images, the top row is produced by OVMono3D, and the bottom row by DetAny3D. For each example, the left sub-figure overlays the projected 3D bounding boxes, while the right sub-figure shows the corresponding bird's-eye view with $1\\mathrm{m} \\times 1\\mathrm{m}$ grids as the background." + ], + "image_footnote": [], + "bbox": [ + 107, + 227, + 887, + 364 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8e4047d9fac1f5250a5333356177ee2f9ce5fdb7d68739d4655955b268b10c54.jpg", + "table_caption": [ + "Table 3. Ablation study of DetAny3D. The table shows the impact of different design choices on $\\mathrm{AP}_{\\mathrm{3D}}$ performance. Each component is progressively added. To save resources, ablations are conducted on $10\\%$ of the full training dataset." + ], + "table_footnote": [], + "table_body": "
Depth&Cam.Merge DINO2D Agg.ZEMAP3D ↑
----5.81
---10.10
--20.20
-23.21
25.80
", + "bbox": [ + 94, + 502, + 483, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "text, and (iii) image + text. With 3D box constraints, Sora generates videos better aligned with intent.", + "bbox": [ + 89, + 607, + 482, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation Studies", + "text_level": 1, + "bbox": [ + 89, + 648, + 254, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Table 3, we ablate key components of DetAny3D, showing the evolution from a SAM-based baseline to DetAny3D with strong 3D generalization. The base model extends SAM with 3D box tokens and a 3D head for direct box prediction. Additional ablations, including backbone and prompt types, are in Supplementary Section 9.", + "bbox": [ + 89, + 670, + 482, + 760 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Effectiveness of Depth & Camera Modules. Depth map provides denser supervision, while camera configuration intrinsic help mitigate disruptions caused by multiple datasets training. Integrating both depth map and camera intrinsic yields improvement in 3D feature extraction and generalization across diverse datasets.", + "- Effectiveness of Merging Depth-Pretrained DINO. Incorporating depth-pretrained DINO yields remarkable improvements, demonstrating that the rich geometric in" + ], + "bbox": [ + 89, + 763, + 483, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "formation from DINO effectively compensates for SAM's limited geometric understanding.", + "bbox": [ + 526, + 436, + 906, + 467 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Effectiveness of 2D Aggregator. Compared to directly adding the features from two models, the 2D Aggregator reduces conflicts between different foundation models, further unleashing the performance gains from two foundation model integration.", + "- Effectiveness of ZEM. ZEM mechanism integrates geometric features through zero-initialized layers, which enables stable 2D-to-3D knowledge transfer during training across datasets with diverse camera parameters, scenes, and depth distributions." + ], + "bbox": [ + 513, + 468, + 906, + 618 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Qualitative Results", + "text_level": 1, + "bbox": [ + 511, + 628, + 694, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We provide qualitative comparisons with OVMono3D. As shown in Figure 4, our model predicts more accurate intrinsics when the camera parameters are unknown and infers more consistent camera poses and 3D detections.", + "bbox": [ + 511, + 650, + 906, + 710 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 511, + 724, + 638, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose DetAny3D, a promptable 3D detection foundation model that can detect arbitrary 3D objects from any monocular image input. DetAny3D exhibits significant zero-shot detection capabilities across diverse domains and effective zero-shot transfer across various tasks, highlighting its suitability for real-world deployment in dynamic and unstructured environments. Moreover, its flexible and robust detection ability opens the door to gathering large-scale, multi-source data for more 3D perception-guided tasks, paving the way toward open-world systems.", + "bbox": [ + 511, + 750, + 906, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 91, + 90, + 258, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We sincerely thank Jiazhi Yang, Tianyu Li, Haochen Tian, Jisong Cai, and Li Chen for their invaluable discussions and constructive feedback throughout this project. Their insights and expertise have contributed significantly to the success of this work. We also appreciate the continuous support and encouragement from all the members of OpenDriveLab. This work is supported by the National Key Research and Development Program of China (2024YFE0210700), the National Natural Science Foundation of China (NSFC) under Grants 62206172 and 62432008, and the Shanghai Artificial Intelligence Laboratory. It is also partially funded by Meituan Inc.", + "bbox": [ + 89, + 114, + 485, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 314, + 187, + 330 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3", + "[2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In CVPR, 2021. 6, 13", + "[3] Umar Asif, Jianbin Tang, and Stefan Harrer. Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices. In IJCAI, 2018. 2", + "[4] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Yuri Feigin, Peter Fu, Thomas Gebauer, Daniel Kurz, Tal Dimry, Brandon Joffe, Arik Schwartz, et al. Arkitsscenes: A diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. In NeurIPS Datasets, 2021. 6, 13", + "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.3", + "[6] Georg Biegelbauer and Markus Vincze. Efficient 3d object detection by fitting superquadrics to range image data for robot's object manipulation. In ICRA, 2007. 2", + "[7] Aleksei Bochkovskii, Amaël Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073, 2024. 2", + "[8] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild. In CVPR, 2023. 2, 3, 5, 6, 7, 13", + "[9] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 3, 6, 13" + ], + "bbox": [ + 99, + 339, + 482, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[10] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In ICCV, 2021. 2", + "[11] Sergio Casas, Abbas Sadat, and Raquel Urtasun. Mp3: A unified model to map, perceive, predict and plan. In CVPR, 2021. 2", + "[12] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE TPAMI, 2024. 2", + "[13] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024. 3", + "[14] Xiaozhi Chen, Kaustav Kundu, Ziyu Zhang, Huimin Ma, Sanja Fidler, and Raquel Urtasun. Monocular 3d object detection for autonomous driving. In CVPR, 2016. 2, 3", + "[15] Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In CVPR, 2017. 2", + "[16] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. In ICLR, 2023. 4", + "[17] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017. 6", + "[18] Saumitro Dasgupta, Kuan Fang, Kevin Chen, and Silvio Savarese. Delay: Robust spatial layout estimation for cluttered indoor scenes. In CVPR, 2016. 3", + "[19] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. In NeurIPS, 2014. 5", + "[20] Hao-Shu Fang, Chenxi Wang, Minghao Gou, and Cewu Lu. Graspnet-1billion: A large-scale benchmark for general object grasping. In CVPR, 2020. 2", + "[21] Nils Gählert, Nicolas Jourdan, Marius Cordts, Uwe Franke, and Joachim Denzler. Cityscapes 3d: Dataset and benchmark for 9 dof vehicle detection. arXiv preprint arXiv:2006.07864, 2020. 6", + "[22] Ruiyuan Gao, Kai Chen, Enze Xie, HONG Lanqing, Zhenguo Li, Dit-Yan Yeung, and Qiang Xu. Magicdrive: Street view generation with diverse 3d geometry control. In ICLR, 2023. 2", + "[23] Ruiyuan Gao, Kai Chen, Zhihao Li, Lanqing Hong, Zhenguo Li, and Qiang Xu. Magicdrive3d: Controllable 3d generation for any-view rendering in street scenes. arXiv preprint arXiv:2405.14475, 2024. 2", + "[24] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. *IJRR*, 2013. 3, 6, 13", + "[25] Jakob Geyer, Yohannes Kassahun, Mentor Mahmudi, Xavier Ricou, Rupesh Durgesh, Andrew S Chung, Lorenz Hauswald, Viet Hoang Pham, Maximilian Mühlegg, Sebastian Dorn, et al. A2d2: Audi autonomous driving dataset. arXiv preprint arXiv:2004.06320, 2020. 6" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. 3", + "[27] Ziyu Guo*, Renrui Zhang*, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023. 3", + "[28] Ziyu Guo*, Renrui Zhang*#, Xiangyang Zhu, Chengzhuo Tong, Peng Gao, Chunyuan Li, and Pheng-Ann Heng. Sam2point: Segment any 3d as videos in zero-shot and promptable manners. arXiv preprint arXiv:2408.16768, 2024.3", + "[29] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025.3", + "[30] Xiankang He, Guangkai Xu, Bo Zhang, Hao Chen, Ying Cui, and Dongyan Guo. Diffcalib: Reformulating monocular camera calibration as diffusion-based dense incident map generation. arXiv preprint arXiv: 2405.15619, 2024. 5", + "[31] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In CVPR, 2023. 2", + "[32] Jin-Cheng Jhang, Tao Tu, Fu-En Wang, Ke Zhang, Min Sun, and Cheng-Hao Kuo. V-mind: Building versatile monocular indoor 3d detector with diverse 2d annotations. In WACV, 2025. 3", + "[33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 2, 3, 6", + "[34] Tobias Koch, Lukas Liebel, Friedrich Fraundorfer, and Marco Korner. Evaluation of cnn-based single-image depth estimation methods. In ECCVW, 2018. 6", + "[35] Maksim Kolodiazhnyi, Anna Vorontsova, Matvey Skripkin, Danila Rukhovich, and Anton Konushin. Unidet3d: Multi-dataset indoor 3d object detection. arXiv preprint arXiv:2409.04234, 2024. 2", + "[36] Buyu Li, Wanli Ouyang, Lu Sheng, Xingyu Zeng, and Xiaogang Wang. Gs3d: An efficient 3d object detection framework for autonomous driving. In CVPR, 2019. 2", + "[37] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In CVPR, 2022. 3", + "[38] Xiaofan Li, Yifu Zhang, and Xiaoqing Ye. Drivingdiffusion: Layout-guided multi-view driving scenarios video generation with latent diffusion model. In European Conference on Computer Vision, 2024. 2", + "[39] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer:" + ], + "bbox": [ + 91, + 90, + 485, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE TPAMI, 2024. 3", + "[40] Zhuoling Li, Xiaogang Xu, SerNam Lim, and Hengshuang Zhao. Unimode: Unified monocular 3d object detection. In CVPR, 2024. 2, 3, 5", + "[41] Tingting Liang, Hongwei Xie, Kaicheng Yu, Zhongyu Xia, Zhiwei Lin, Yongtao Wang, Tao Tang, Bing Wang, and Zhi Tang. Bevfusion: A simple and robust lidar-camera fusion framework. In NeurIPS, 2022. 3", + "[42] Xuewu Lin, Tianwei Lin, Zixiang Pei, Lichao Huang, and Zhizhong Su. Sparse4d: Multi-view 3d object detection with sparse spatial-temporal fusion. arXiv preprint arXiv:2211.10581, 2022. 3", + "[43] Luyang Liu, Hongyu Li, and Marco Gruteser. Edge assisted real-time object detection for mobile augmented reality. In MobiCom, 2019. 2", + "[44] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In ECCV, 2024. 2, 3, 5, 6", + "[45] Zechen Liu, Zizhang Wu, and Roland Tóth. Smoke: Single-stage monocular 3d object detection via keypoint estimation. In CVPRW, 2020. 3, 7", + "[46] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 6", + "[47] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6", + "[48] Xinzhu Ma, Wanli Ouyang, Andrea Simonelli, and Elisa Ricci. 3d object detection from images for autonomous driving: a survey. IEEE TPAMI, 2023. 2", + "[49] Jiageng Mao, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. 3d object detection for autonomous driving: A comprehensive survey. IJCV, 2023. 2", + "[50] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2", + "[51] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. TMLR, 2024. 2, 3, 6", + "[52] Youngmin Park, Vincent Lepetit, and Woontack Woo. Multiple 3d object tracking for augmented reality. In ISMAR, 2008. 2", + "[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 6", + "[54] Luigi Piccinelli, Yung-Hsu Yang, Christos Sakaridis, Mattia Segu, Siyuan Li, Luc Van Gool, and Fisher Yu. Unidepth: Universal monocular metric depth estimation. In CVPR, 2024. 2, 3, 4, 5, 6, 14" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 509, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[55] Zhangyang Qi, Zhixiong Zhang, Ye Fang, Jiaqi Wang, and Hengshuang Zhao. Gpt4scene: Understand 3d scenes from videos with vision-language models. arXiv preprint arXiv:2501.01428, 2025. 3", + "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 2, 3", + "[57] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 13", + "[58] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In WACV, 2022. 7", + "[59] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 6", + "[60] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beiwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In ECCV, 2024. 3", + "[61] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 6, 13", + "[62] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 6", + "[63] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3", + "[64] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. Demon: Depth and motion network for learning monocular stereo. In CVPR, 2017. 5", + "[65] Johanna Wald, Armen Avetisyan, Nassir Navab, Federico Tombari, and Matthias Nießner. Rio: 3d object instance re-localization in changing indoor environments. In ICCV, 2019. 6, 15", + "[66] Tai Wang, Xinge Zhu, Jiangmiao Pang, and Dahua Lin. Fcos3d: Fully convolutional one-stage monocular 3d object detection. In ICCV, 2021. 3, 5", + "[67] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. Embodiedscan: A holistic multimodal 3d perception suite towards embodied ai. In CVPR, 2024. 2", + "[68] Zhenyu Wang, Ya-Li Li, Xi Chen, Hengshuang Zhao, and Shengjin Wang. Uni3detr: Unified 3d detection transformer. In NeurIPS, 2023. 2, 7" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Zhenyu Wang, Yali Li, Taichi Liu, Hengshuang Zhao, and Shengjin Wang. Ov-uni3detr: Towards unified open-vocabulary 3d object detection via cycle-modality propagation. In ECCV, 2024. 3", + "[70] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, et al. Argoverse 2: Next generation datasets for self-driving perception and forecasting. In NeurIPS Datasets, 2023. 6", + "[71] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In CVPR, 2019. 6", + "[72] Jie Yang, Bingliang Li, Ailing Zeng, Lei Zhang, and Ruimao Zhang. Open-world human-object interaction detection via multi-modal prompts. In CVPR, 2024. 3", + "[73] Xiuyu Yang, Yunze Man, Junkun Chen, and Yu-Xiong Wang. Scenecraft: Layout-guided 3d scene generation. In NeurIPS, 2025. 2", + "[74] Jin Yao, Hao Gu, Xuweiyi Chen, Jiayun Wang, and Zezhou Cheng. Open vocabulary monocular 3d object detection. arXiv preprint arXiv:2411.16833, 2024. 2, 3, 5, 6, 7, 13, 15", + "[75] Kaixin Yao, Longwen Zhang, Xinhao Yan, Yan Zeng, Qixuan Zhang, Lan Xu, Wei Yang, Jiayuan Gu, and Jingyi Yu. Cast: Component-aligned 3d scene reconstruction from anrgb image. arXiv preprint arXiv:2502.12894, 2025. 2", + "[76] Wei Yin, Chi Zhang, Hao Chen, Zhipeng Cai, Gang Yu, Kaixuan Wang, Xiaozhi Chen, and Chunhua Shen. Metric3d: Towards zero-shot metric 3d prediction from a single image. In ICCV, 2023. 2", + "[77] Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 6", + "[78] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In CVPR, 2022. 3", + "[79] Renrui Zhang, Zhengkai Jiang, Ziyu Guo, Shilin Yan, Junting Pan, Hao Dong, Peng Gao, and Hongsheng Li. Personalize segment anything model with one shot. *ICLR*, 2023. 3", + "[80] Renrui Zhang, Han Qiu, Tai Wang, Ziyu Guo, Ziteng Cui, Yu Qiao, Hongsheng Li, and Peng Gao. Monodetr: Depth-guided transformer for monocular 3d object detection. In ICCV, 2023. 3, 5", + "[81] Renrui Zhang, Jiaming Han, Chris Liu, Aojun Zhou, Pan Lu, Yu Qiao, Hongsheng Li, and Peng Gao. Llama-adapter: Efficient fine-tuning of large language models with zero-initialized attention. In ICLR, 2024. 3", + "[82] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739, 2024. 3", + "[83] Haoyi Zhu, Honghui Yang, Xiaoyang Wu, Di Huang, Sha Zhang, Xianglong He, Hengshuang Zhao, Chunhua Shen, Yu" + ], + "bbox": [ + 516, + 90, + 903, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Qiao, Tong He, et al. Ponderv2: Pave the way for 3d foundation model with a universal pre-training paradigm. arXiv preprint arXiv:2310.08586, 2023. 3", + "[84] Menglong Zhu, Konstantinos G Derpanis, Yinfei Yang, Samarth Brahmbhatt, Mabel Zhang, Cody Phillips, Matthieu Lecce, and Kostas Daniilidis. Single image 3d object detection and pose estimation for grasping. In ICRA, 2014. 2", + "[85] Ziyu Zhu, Zhuofan Zhang, Xiaojian Ma, Xuesong Niu, Yixin Chen, Baoxiong Jia, Zhidong Deng, Siyuan Huang, and Qing Li. Unifying 3d vision-language understanding via promptable queries. In ECCV, 2024. 3", + "[86] Yiming Zuo, Karhan Kayan, Maggie Wang, Kevin Jeon, Jia Deng, and Thomas L Griffiths. Towards foundation models for 3d vision: How close are we? arXiv preprint arXiv:2410.10799, 2024. 2" + ], + "bbox": [ + 91, + 90, + 482, + 301 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Detect Anything 3D in the Wild Supplementary Material", + "text_level": 1, + "bbox": [ + 338, + 85, + 658, + 138 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6. DA3D", + "text_level": 1, + "bbox": [ + 89, + 155, + 166, + 170 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "DA3D is a unified 3D detection dataset, consists of 16 diverse datasets. It builds upon six datasets in Omni3D—Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]—while partially incorporating an additional 10 datasets to further enhance the scale, diversity, and generalization capabilities of 3D detection models. As shown in Figure 5, DA3D comprises 0.4 million frames ( $2.5 \\times$ the scale of Omni3D), spanning 20 distinct camera configurations.", + "bbox": [ + 89, + 183, + 483, + 319 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The dataset is standardized with the similar structure to Omni3D [8], including monocular RGB images, camera intrinsics, 3D bounding boxes, and depth maps. DA3D is designed to test 3D detection models across a wide variety of environments, camera configurations, and object categories, offering a more comprehensive evaluation setting.", + "bbox": [ + 89, + 321, + 483, + 412 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.1. Dataset Composition", + "text_level": 1, + "bbox": [ + 89, + 430, + 287, + 446 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We categorize the datasets in DA3D based on two aspects:", + "bbox": [ + 89, + 454, + 477, + 469 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Indoor vs. Outdoor. As shown in Figure 6 (left), DA3D expands both indoor and outdoor datasets compared to Omni3D. Additionally, the ratio of indoor to outdoor data in DA3D is more balanced than in Omni3D, ensuring a more representative distribution for models trained across diverse environments.", + "bbox": [ + 89, + 470, + 483, + 561 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Supervision Types. We also analyze DA3D in terms of the distribution of supervision types (See Figure 6 (right)):", + "bbox": [ + 89, + 565, + 482, + 595 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $35\\%$ data provides only depth supervision.", + "- $23\\%$ data provide only 3D bounding box annotations.", + "- $42\\%$ data contains both depth maps and 3D bounding boxes.", + "- Intrinsic parameters are available for all data." + ], + "bbox": [ + 91, + 598, + 480, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.2. Dataset Splits.", + "text_level": 1, + "bbox": [ + 89, + 689, + 236, + 705 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For training and evaluation, we follow the dataset splitting strategy used in prior works [8]. Specifically:", + "bbox": [ + 89, + 714, + 482, + 744 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We construct the training set by merging training subsets from the original datasets.", + "- We form the validation set by sampling from the original training data, ensuring balanced representation.", + "- We use the original validation sets of each dataset as the test set, allowing for direct comparison with previous benchmarks." + ], + "bbox": [ + 91, + 747, + 482, + 851 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This setup ensures fair evaluation and maintains consistency with existing benchmarks while assessing both indomain and zero-shot generalization capabilities.", + "bbox": [ + 89, + 854, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg", + "image_caption": [ + "Figure 5. The composition of the DA3D dataset." + ], + "image_footnote": [], + "bbox": [ + 521, + 159, + 898, + 364 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg", + "image_caption": [ + "Figure 6. The data distribution of the DA3D dataset. (left): the statistics of indoor and outdoor data. (right): the statistics of data with different supervision categories." + ], + "image_footnote": [], + "bbox": [ + 524, + 430, + 890, + 545 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.3. Evaluation Setup", + "text_level": 1, + "bbox": [ + 513, + 646, + 684, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "DA3D is designed to evaluate zero-shot generalization in both novel object categories and novel camera configurations. We define two evaluation settings:", + "bbox": [ + 511, + 669, + 905, + 714 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Zero-Shot Categories. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as unseen classes for zero-shot testing.", + "bbox": [ + 511, + 715, + 905, + 760 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Zero-Shot Datasets.", + "text_level": 1, + "bbox": [ + 513, + 762, + 656, + 773 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We use Cityscapes3D, Waymo, and 3RScan as unseen datasets with novel camera configurations.", + "- Cityscapes3D & Waymo introduce new intrinsics and image styles, challenging models to generalize across different camera setups.", + "- 3RScan not only introduces novel camera setups, but also contains unseen object categories, making it useful for testing both category and camera generalization." + ], + "bbox": [ + 513, + 777, + 903, + 898 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg", + "image_caption": [ + "Figure 7. Detailed implementation of camera and depth module from UniDepth." + ], + "image_footnote": [], + "bbox": [ + 91, + 119, + 480, + 258 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7. Model Details", + "text_level": 1, + "bbox": [ + 89, + 353, + 230, + 369 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7.1. Camera and Depth Module Details", + "text_level": 1, + "bbox": [ + 89, + 378, + 393, + 395 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section introduces how the camera module and depth module work, predicting intrinsic and camera-aware depth, also related feature.", + "bbox": [ + 89, + 400, + 482, + 444 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As show in Figure 7, the fused feature $\\hat{\\mathbf{F}}_{\\mathrm{fused}}$ are input into the camera module, which uses a cross-attention mechanism and a to obtain the camera intrinsic parameters. These intrinsic parameters are then used to generate camera rays. The rays are defined as:", + "bbox": [ + 89, + 445, + 483, + 521 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n(r _ {1}, r _ {2}, r _ {3}) = \\mathbf {K} ^ {- 1} \\left[ \\begin{array}{l} u \\\\ v \\\\ 1 \\end{array} \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 531, + 367, + 580 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\mathbf{K}$ is the calibration matrix, $u$ and $v$ are the pixel coordinates, and 1 is a vector of ones. In this context, the homogeneous camera rays $(r_x,r_y)$ are derived from:", + "bbox": [ + 89, + 585, + 483, + 633 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c} r _ {1} \\\\ \\hline r _ {3} \\end{array} , \\frac {r _ {2}}{r _ {3}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 641, + 321, + 675 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This dense representation of the camera rays undergoes Laplace Spherical Harmonic Encoding (SHE) [54] to produce the embeddings $\\mathbf{C}$ . These embeddings are then passed to the depth module using the cross-attention mechanism.", + "bbox": [ + 89, + 680, + 482, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The depth feature conditioned on the camera embeddings, is computed as:", + "bbox": [ + 89, + 739, + 482, + 771 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} \\mid \\mathbf {C} = \\operatorname {M L P} (\\operatorname {C r o s s A t t n} (\\mathbf {D}, \\mathbf {C}))\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 785, + 403, + 801 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Subsequently, the depth feature is processed through an upsampling head to predict the final depth map.", + "bbox": [ + 89, + 808, + 482, + 839 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7.2.3D Box Head Details", + "text_level": 1, + "bbox": [ + 89, + 848, + 287, + 863 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section introduces the details of the 3D box head. After the query $\\mathbf{Q}$ passes through the Geometric Transformer", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg", + "image_caption": [ + "Figure 8. 3D Box head details." + ], + "image_footnote": [], + "bbox": [ + 547, + 93, + 875, + 256 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "and Two-Way Transformer, the model outputs $\\mathbf{O}$ . $\\mathbf{O}$ contains outputs corresponding to both 3D-related hidden states $\\mathbf{O}_{3D}$ and prompt hidden states $\\mathbf{O}_p$ . We extract the 3D-related output $\\mathbf{O}_{3D}$ for further processing.", + "bbox": [ + 511, + 311, + 903, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Subsequently, $\\mathbf{O}_{3\\mathrm{D}}$ is passed through a series of prediction heads as shown in Figure 8.", + "bbox": [ + 511, + 372, + 903, + 402 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We then transform these predictions into the final 3D bounding box parameters and obtain the 3D bounding box $(x,y,z,w,h,l,R,S)$ for each detected object, where $(x,y,z)$ denotes the 3D center, $(w,h,l)$ represent the dimensions, and $(R,S)$ describe the rotation and predicted 3D IoU score.", + "bbox": [ + 511, + 402, + 905, + 492 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7.3. Loss Details", + "text_level": 1, + "bbox": [ + 513, + 503, + 643, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Depth Loss. The depth module is supervised using the Scale-Invariant Logarithmic (SILog) loss, defined as:", + "bbox": [ + 511, + 526, + 903, + 556 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {d e p t h}} = \\sqrt {\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i} ^ {2} - 0 . 1 5 \\cdot \\left(\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i}\\right) ^ {2}} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 568, + 903, + 616 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\Delta d_{i} = \\log (d_{i}^{\\mathrm{pred}}) - \\log (d_{i}^{\\mathrm{gt}})$ , and $N$ is the number of valid depth pixels.", + "bbox": [ + 511, + 628, + 903, + 661 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Camera Intrinsic Loss. The camera error is computed with the dense camera rays. For an image with height $H$ and width $W$ , the intrinsic loss is formulated as:", + "bbox": [ + 511, + 661, + 903, + 705 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {c a m}} = \\sqrt {\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i} ^ {2} - 1 \\cdot \\left(\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i}\\right) ^ {2}} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 522, + 731, + 903, + 780 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\Delta r_{i} = r_{i}^{\\mathrm{pred}} - r_{i}^{\\mathrm{gt}}$", + "bbox": [ + 511, + 792, + 679, + 809 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Detection Loss. The detection loss consists of three components:", + "bbox": [ + 511, + 810, + 903, + 839 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Smooth L1 loss for box regression, covering the prediction of center, depth, and dimensions.", + "- Chamfer loss for rotation matrix prediction, ensuring accurate orientation estimation." + ], + "bbox": [ + 513, + 840, + 903, + 898 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg", + "image_caption": [ + "Figure 9. An example on 3RScan. The left image shows the original 3RScan annotations, while the right image presents the detection results from Grounding DINO after feeding in all the 3RScan labels. Severe naming ambiguities (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotations lead to a substantial decrease in the detector's performance." + ], + "image_footnote": [], + "bbox": [ + 124, + 88, + 450, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "- Mean squared error (MSE) loss for 3D IoU score prediction, which optimizes the confidence estimates of detected objects.", + "bbox": [ + 89, + 340, + 482, + 385 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Combining these terms, the total detection loss is:", + "bbox": [ + 109, + 386, + 439, + 401 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d e t}} = \\mathcal {L} _ {\\mathrm {b o x}} + \\mathcal {L} _ {\\mathrm {r o t}} + \\mathcal {L} _ {\\mathrm {i o u}}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 411, + 480, + 426 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "8. Target-aware Metrics", + "text_level": 1, + "bbox": [ + 89, + 438, + 297, + 455 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In our work, we evaluate both traditional metrics and the target-aware metrics proposed by OVMono3D [74]. Under the target-aware paradigm, rather than prompting the model with all possible classes from an entire dataset, we only prompt it with the classes present in the current image during inference. This is designed to address two key challenges encountered:", + "bbox": [ + 89, + 463, + 482, + 568 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Missing annotations: Comprehensive 3D annotation is often impractical or prohibitively expensive, leading to incomplete ground-truth annotations.", + "- Naming ambiguity: Datasets may label the same objects with inconsistent category names or annotation policies, creating confusion when merging datasets." + ], + "bbox": [ + 89, + 569, + 482, + 659 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As illustrated in Figure 9, these issues are especially pronounced in the 3RScan [65] dataset. The left side shows the official 3RScan annotations, while the right side shows detections from Grounding DINO, which are largely misaligned with the dataset's labeling conventions. Consequently, traditional evaluation metrics may yield misleading or inconsistent results, whereas target-aware metrics help mitigate these mismatches by restricting the evaluated classes to those actually present in the scene.", + "bbox": [ + 89, + 660, + 482, + 795 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "9. More Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 808, + 290, + 825 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "9.1. Various Prompts Performance", + "text_level": 1, + "bbox": [ + 89, + 833, + 359, + 849 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we evaluate different types of prompts, including box prompts, point prompts, and text prompts, both with and without intrinsic prompts. The results on Omni3D", + "bbox": [ + 89, + 854, + 483, + 900 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/2064a3cef29d98631876b710c36133004ed5bd90a00421ac59b9bd0bff893d21.jpg", + "table_caption": [ + "Table 4. Various Prompt Performance." + ], + "table_footnote": [], + "table_body": "
Prompt TypeBoxPointText
w/ Intrinsic Prompt34.3825.1922.31
w/o Intrinsic Prompt32.1624.021.02
", + "bbox": [ + 550, + 114, + 870, + 172 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/97862b908765a136856a0ed4e484ee47780443596a04ca4bb07f91b048c034cf.jpg", + "table_caption": [ + "Table 5. Ablation on different backbones. The table reports $\\mathrm{AP}_{3\\mathrm{D}}$ scores. We verify the effectiveness of SAM and DINO along two dimensions: (1) whether or not we use the pretrained SAM parameters, and (2) whether adopt the pretrained DINO backbone or ConvNeXt for the depth module." + ], + "table_footnote": [], + "table_body": "
Backbonew/ SAMw/o SAM
DINO25.8019.12
ConvNeXt23.1118.27
", + "bbox": [ + 581, + 268, + 834, + 330 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "are presented in Table 4. Each prompt type demonstrates its effectiveness in guiding 3D detection. Besides, on the zero-shot datasets, we observe that omitting intrinsic prompts leads to a significant performance drop (even approaching zero), which further highlights the critical role of intrinsic prompts for reliable depth calibration in unseen scenarios.", + "bbox": [ + 511, + 357, + 906, + 448 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "9.2. Ablation on Different Backbones", + "text_level": 1, + "bbox": [ + 511, + 458, + 802, + 474 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we investigate our choice of backbone by comparing the use of SAM and DINO backbones. For DINO, we replace it with ConvNeXt and adopt the same pretraining method proposed by UniDepth. For SAM, we examine its effect by removing the SAM-pretrained weights and training from scratch. As shown in Table 5, SAM's pretrained parameters prove crucial for boosting performance. Meanwhile, compared to ConvNeXt, DINO offers richer geometric representations, resulting in stronger 3D detection performance.", + "bbox": [ + 511, + 481, + 906, + 632 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "9.3. Ablation on DA3D Dataset", + "text_level": 1, + "bbox": [ + 511, + 643, + 756, + 659 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We ablate the impact of the DA3D dataset in Tab. 6. The additional data in DA3D primarily improves generalization to novel cameras, as Omni3D contains only two distinctive intrinsics for outdoor scenes.", + "bbox": [ + 511, + 666, + 905, + 726 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 6. Ablation on training datasets. Unless specified, all models are trained on the Omni3D dataset. For the in-domain setting, prompts are provided by Cube R-CNN, while prompts for novel classes and novel datasets are generated by Grounding DINO.", + "bbox": [ + 511, + 738, + 905, + 794 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/7e29ff8cb32bc9588a5c6dba8caf4d5feeecdd9ecd9443329a0af0fad606e977.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodIn-domain\nAPommi3d\n3DNovel ClassNovel Camera
APkit\n3DAPsun\n3DAPcity\n3DAP3rs\n3D
Cube R-CNN23.26--8.22 / --
OVMono3D22.984.71 / 4.714.07 / 16.785.88 / 10.980.37 / 8.48
DetAny3D24.3323.75 / 23.757.63 / 20.878.31 / 11.680.64 / 9.56
DetAny3DDA3D24.9225.73 / 25.737.63 / 21.0711.05 / 15.710.65 / 9.58
", + "bbox": [ + 516, + 805, + 903, + 881 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "9.4. Ablation on Inference Speed", + "text_level": 1, + "bbox": [ + 89, + 90, + 344, + 107 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We compare the inference speed of DetAny3D with prior methods in Table 7. DetAny3D runs at 1.5 FPS on a single KITTI image, which is slower than Cube R-CNN (33.3 FPS) and OVMono3D (7.1 FPS). This is a trade-off for stronger generalization across novel categories and cameras, as DetAny3D is designed as a foundation model rather than for real-time deployment.", + "bbox": [ + 89, + 112, + 483, + 218 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/3d4fc0898db5ec9fbe8d3c6ce360302596584579310f43e349d85be44983da8a.jpg", + "table_caption": [ + "Table 7. Inference speed comparison on KITTI." + ], + "table_footnote": [], + "table_body": "
MethodCube R-CNNOVMono3DDetAny3D
FPS ↑33.37.11.5
", + "bbox": [ + 99, + 256, + 475, + 305 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "9.5. Per-category Performance on Novel Classes", + "text_level": 1, + "bbox": [ + 89, + 328, + 460, + 345 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As shown in Table 8, we provide a detailed comparison of per-category $\\mathrm{AP}_{3\\mathrm{D}}$ on novel classes from the KITTI, SUNRGBD, and ARKitScenes datasets between our DetAny3D and the baseline OVMono3D. DetAny3D shows consistent improvements across most categories.", + "bbox": [ + 89, + 349, + 483, + 426 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "10. Limitations", + "text_level": 1, + "bbox": [ + 91, + 439, + 222, + 455 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text Prompt Process. Our method leverages open-vocabulary 2D detectors such as Grounding DINO to convert text prompts into 2D box prompts. While effective, this strategy may cause semantic loss, as textual nuances are not directly injected into the 3D detection pipeline. Moreover, 2D detectors are known to perform poorly under heavy occlusion or partial visibility, introducing a domain gap when transferring their outputs to 3D tasks.", + "bbox": [ + 89, + 465, + 483, + 585 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Inference Efficiency. Although DetAny3D achieves strong generalization across novel categories and camera settings, its inference speed (1.5 FPS) is significantly slower than existing lightweight 3D detectors. This limits its applicability in latency-sensitive scenarios such as real-time robotics or autonomous driving.", + "bbox": [ + 89, + 585, + 483, + 675 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lack of Temporal Modeling. Our current design operates on single-frame inputs and does not utilize temporal information from video sequences. Incorporating motion cues and enforcing temporal consistency could potentially improve detection accuracy and enable better integration into downstream video-based tasks, such as video knowledge distillation and temporal grounding.", + "bbox": [ + 89, + 676, + 483, + 782 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "11. Licenses and Privacy", + "text_level": 1, + "bbox": [ + 89, + 795, + 302, + 813 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "All data used in this work are obtained from publicly available datasets and are subject to their respective licenses.", + "bbox": [ + 89, + 820, + 480, + 851 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/63c168899add200908ab32369bdd433eff8a0a4b37295ebfefda64321412b0b9.jpg", + "table_caption": [ + "Table 8. Per-category target-aware $\\mathrm{AP}_{3\\mathrm{D}}$ comparison on novel classes between DetAny3D and OVMono3D." + ], + "table_footnote": [], + "table_body": "
CategoryOVMono3DDetAny3D
Board4.836.02
Printer16.2360.22
Painting2.805.11
Microwave30.3157.21
Tray10.116.70
Podium48.3773.65
Cart47.3133.46
Tram4.7127.90
Easy Categories20.5833.79
Monitor9.4415.95
Bag15.6117.69
Dresser29.0841.75
Keyboard9.139.52
Drawers43.0440.80
Computer7.4412.37
Kitchen Pan9.988.70
Potted Plant6.6626.34
Tissues12.4512.95
Rack10.219.04
Toys5.2416.14
Phone3.894.42
Soundsystem13.226.21
Fireplace13.1630.75
Hard Categories13.4718.05
All Categories16.0523.77
", + "bbox": [ + 547, + 295, + 874, + 731 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_model.json b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6a7dbc53d4ded7149ef2f806d4a06019fc7883dc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_model.json @@ -0,0 +1,3730 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.058, + 0.714 + ], + "angle": 270, + "content": "arXiv:2504.07958v3 [cs.CV] 2 Dec 2025" + }, + { + "type": "title", + "bbox": [ + 0.339, + 0.131, + 0.66, + 0.154 + ], + "angle": 0, + "content": "Detect Anything 3D in the Wild" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.18, + 0.825, + 0.219 + ], + "angle": 0, + "content": "Hanxue Zhang\\(^{1,2*}\\), Haoran Jiang\\(^{1,3*}\\), Qingsong Yao\\(^{4*}\\), Yanan Sun\\(^{1}\\), Renrui Zhang\\(^{5}\\), Hao Zhao\\(^{6}\\), Hongyang Li\\(^{1}\\), Hongzi Zhu\\(^{2}\\), Zetong Yang\\(^{1,7}\\)" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.223, + 0.899, + 0.26 + ], + "angle": 0, + "content": "1 OpenDriveLab at Shanghai AI Laboratory 2 Shanghai Jiao Tong University 3 Fudan University 4 Stanford University 5 CUHK MMLab 6 Tsinghua University 7 GAC R&D Center" + }, + { + "type": "text", + "bbox": [ + 0.302, + 0.265, + 0.699, + 0.281 + ], + "angle": 0, + "content": "https://github.com/OpenDriveLab/DetAny3D" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.306, + 0.905, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.648, + 0.908, + 0.706 + ], + "angle": 0, + "content": "Figure 1. Introducing DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object with arbitrary monocular images in diverse scenes. Our framework enables multi-prompt interaction (e.g., box, point, and text) to deliver open-world 3D detection results (\\(w \\times h \\times l\\) in centimeter) for novel objects across various domains. It achieves significant zero-shot generalization, outperforming SOTA by up to 21.02 and 5.68 AP3D on novel categories and novel datasets with new camera configurations." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.732, + 0.327, + 0.748 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.767, + 0.483, + 0.873 + ], + "angle": 0, + "content": "Despite the success of deep learning in close-set 3D object detection, existing approaches struggle with zero-shot generalization to novel objects and camera configurations. We introduce DetAny3D, a promptable 3D detection foundation model capable of detecting any novel object under arbitrary camera configurations using only monocular inputs. Training a foundation model for 3D detection is fun" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.719, + 0.907, + 0.901 + ], + "angle": 0, + "content": "damentally constrained by the limited availability of annotated 3D data, which motivates DetAny3D to leverage the rich prior knowledge embedded in extensively pre-trained 2D foundation models to compensate for this scarcity. To effectively transfer 2D knowledge to 3D, DetAny3D incorporates two core modules: the 2D Aggregator, which aligns features from different 2D foundation models, and the 3D Interpreter with Zero-Embedding Mapping, which stabilizes early training in 2D-to-3D knowledge transfer. Experimental results validate the strong generalization of our DetAny3D, which not only achieves state-of-the-art performance on unseen categories and novel camera configura" + }, + { + "type": "page_footnote", + "bbox": [ + 0.116, + 0.888, + 0.227, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.198 + ], + "angle": 0, + "content": "tions, but also surpasses most competitors on in-domain data. DetAny3D sheds light on the potential of the 3D foundation model for diverse applications in real-world scenarios, e.g., rare object detection in autonomous driving, and demonstrates promise for further exploration of 3D-centric tasks in open-world settings. More visualization results can be found at our code repository." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.226, + 0.223, + 0.243 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.251, + 0.483, + 0.523 + ], + "angle": 0, + "content": "3D object detection is a fundamental technology for autonomous systems [12, 14, 15, 36, 48, 49], robotics [6, 67, 84], and augmented reality [43, 52]. 3D perception not only enables machines to perceive and interact with the physical world, but also serves as a foundational input for more advanced tasks, such as behavior decision [3, 11, 20, 31], world modeling [22, 23, 38] and 3D scene reconstruction [50, 73, 75]. For practical deployment, a generalizable 3D detector ideally should detect arbitrary objects from easily accessible inputs, such as monocular images, without relying on specific sensor parameters. Such a model would be highly adaptable and reliable for various downstream tasks in diverse and unpredictable environments [15, 36, 43, 84]. Also, accurate detection results provided by such a detector (e.g., generating 3D bounding boxes for even images from the internet) make it a versatile tool, paving the way for scalable 3D systems that leverage Internet-scale data and advance toward open-world scenarios [22, 23, 38, 50, 73]." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.524, + 0.483, + 0.689 + ], + "angle": 0, + "content": "Previous research, exemplified by Omni3D [8], has attempted to improve the generalization of the 3D detection system through multi-dataset training [8, 35, 40, 68]. However, despite utilizing large datasets to train a unified detector [8, 40], these approaches provide limited generalization to novel camera configurations and cannot detect unseen object categories beyond predefined label spaces. Therefore, developing a 3D detection foundation model with strong zero-shot generalizability, which is capable of detecting any unseen object under arbitrary camera configurations, remains a crucial and unsolved problem." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.689, + 0.483, + 0.869 + ], + "angle": 0, + "content": "While recent advances in 2D foundation models [33, 44, 51, 56] demonstrate remarkable zero-shot capabilities. Segment Anything Model (SAM) [33] features a promptable inference mechanism, supporting user-friendly prompts like points and boxes to segment user-specified objects. Their impressive generalization ability stems from training on billions of annotated images. However, in 3D object detection, the available labeled data is limited to only millions of samples—typically 3-4 orders of magnitude smaller than in 2D images. Such severe data scarcity [74, 86] poses a fundamental challenge, making it nearly infeasible to train a 3D foundation model from scratch." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.901 + ], + "angle": 0, + "content": "In this work, we present DetAny3D, a promptable 3D detection foundation model designed for generalizable 3D" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.227 + ], + "angle": 0, + "content": "object detection using only monocular images (see Figure 1). Given the inherent scarcity of 3D annotated data, we achieve strong generalization from two critical perspectives: model architecture and data utilization. The central insight of our approach is to leverage the extensive prior knowledge encoded within two broadly pre-trained 2D foundation models—SAM [33] and DINO [10, 51)—thus unlocking effective zero-shot 3D detection capabilities with minimal available 3D data." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.23, + 0.905, + 0.41 + ], + "angle": 0, + "content": "Specifically, we adopt SAM as our promptable backbone, capitalizing on its versatile and robust object understanding capability derived from large-scale 2D data. Concurrently, we utilize DINO [51] depth-pretrained by UniDepth [54], to offer redundant 3D geometric priors [7, 76], which plays a pivotal role for accurate 3D detection in a monocular setting. To integrate the complementary features from SAM and DINO more effectively, we propose the 2D Aggregator, an attention-based mechanism that aligns these features and dynamically optimizes their contributions via learnable gating. 2D Aggregator fully exploits the strengths of each foundation model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.413, + 0.905, + 0.565 + ], + "angle": 0, + "content": "To further address the challenge of effectively transferring knowledge from 2D to 3D, we introduce the 3D Interpreter. Central to the 3D Interpreter is the Zero-Embedding Mapping (ZEM) mechanism, which ensures stable 2D-to-3D mapping by reducing early-stage interference and preserving pretrained 2D priors. By stabilizing the training process across diverse datasets with varying camera parameters, scene complexities, and depth distributions, the ZEM mechanism enables progressive zero-shot 3D grounding capabilities, significantly enhancing model generalization." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.905, + 0.778 + ], + "angle": 0, + "content": "To leverage as much 3D-related data as possible, we aggregate a diverse range of datasets, including 16 datasets spanning depth with intrinsic data and 3D detection data, referred as DA3D. Experimental results, using prompts aligned with the baselines, demonstrate three key advantages: (1) Generalization to novel classes: achieves \\(21.0\\%\\), \\(4.3\\%\\), \\(11.3\\%\\) higher zero-shot \\(\\mathrm{AP}_{3\\mathrm{D}}\\) than baselines on novel categories on KITTI, SUNRGBD, and ARKitScenes. (2) Generalization to novel cameras: improves cross-dataset performance by \\(4.7\\%\\), \\(5.7\\%\\) and \\(1.1\\%\\) \\(\\mathrm{AP}_{3\\mathrm{D}}\\) compared to baseline methods on zero-shot datasets Cityscapes3D, Waymo and 3RScan. (3) Performance on in-domain data: surpasses baseline by \\(1.6\\%\\) \\(\\mathrm{AP}_{3\\mathrm{D}}\\) on Omni3D. Core contributions are summarized in following:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.78, + 0.904, + 0.825 + ], + "angle": 0, + "content": "- We develop DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object in real-world scenarios with arbitrary monocular inputs." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.826, + 0.905, + 0.886 + ], + "angle": 0, + "content": "- DetAny3D introduces 2D Aggregator to effectively fuse the features from two 2D foundation models SAM and depth-pretrained DINO, which provide pivot shape and 3D geometric priors for various objects, respectively." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.886, + 0.905, + 0.901 + ], + "angle": 0, + "content": "- In 2D-to-3D knowledge transfer, DetAny3D involves" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.78, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.104, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "Zero-Embedding Mapping in 3D Interpreter to achieve stable 2D-to-3D mapping, enabling the model to train robustly across datasets with diverse camera parameters, varying scenes, and different depth distributions." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.153, + 0.482, + 0.228 + ], + "angle": 0, + "content": "- The experimental results demonstrate significant advantages of DetAny3D, particularly in accurately detecting unseen 3D objects with arbitrary camera parameters in the zero-shot setting, showcasing its potential across a wide range of real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.244, + 0.236, + 0.259 + ], + "angle": 0, + "content": "2. Related works" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.269, + 0.283, + 0.285 + ], + "angle": 0, + "content": "2.1. 3D Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.292, + 0.485, + 0.716 + ], + "angle": 0, + "content": "Existing 3D object detection systems have predominantly focused on single-dataset optimization, achieving strong performance on benchmark datasets like KITTI [24] and nuScenes [9] through task-specific architectures [14, 18, 39, 41, 42, 45, 66, 80]. While effective in constrained scenarios, these approaches suffer from significant domain gaps when deployed in new contexts, primarily due to their reliance on limited sensor-specific data and closed-set assumptions. Recent works, exemplified by Omni3D [8], have demonstrated the potential of multi-dataset training. Models like Cube R-CNN [8] and UniMODE [40] train a universal monocular 3D detector across multiple datasets, achieving some level of robustness to camera parameters, but are still restricted to predefined classes. V-MIND [32] further addresses the data scarcity challenge by generating pseudo 3D training data from large-scale 2D annotations. Towards more general detection, OV-Uni3DETR [69] pioneers openset detection that is able to detect with multimodal inputs, but it is trained separately for indoor and outdoor domains, thereby limiting its overall generalization. More recently, OVMono3D [74] leverages Grounding DINO's [44] 2D results with a 3D head on unified datasets. However, it does not fully exploit the priors contained in 2D foundation models, leading to performance constraints tied to the limited 3D data. In contrast, our approach fully capitalizes on the knowledge distilled in 2D foundation models while leveraging abundant 3D-related data, thereby enabling the detection of any 3D object from arbitrary monocular inputs." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.727, + 0.33, + 0.743 + ], + "angle": 0, + "content": "2.2. Vision Foundation Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Foundation models have demonstrated significant potential across various domains. For example, language foundation models such as GPT-4 [1] and DeepSeek [5, 26], trained on massive internet-scale corpora, have achieved impressive capabilities in natural language processing across diverse fields [1, 5, 60, 63, 81, 82]. Similarly, foundation models in the vision domain have made remarkable strides [29, 33, 37, 44, 51, 56, 79]. DINOv2 [51], trained on a vast range of curated data from diverse sources, is capable of producing general-purpose visual features that work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.425 + ], + "angle": 0, + "content": "seamlessly across different image distributions and tasks. SAM [33] has taken a step further in the vision domain by introducing promptability, enabling models to generalize to novel visual concepts through large-scale data training and continuous model refinement. In recent years, the development of foundation models in the 3D domain has started to take initial steps [13, 28, 55, 78, 83, 85]. Most existing 3D foundation models are often combined with vision-language models (VLMs) [13, 27, 55, 85], relying on point clouds as input to help the language models understand 3D [13, 85]. While these methods are valuable for scene understanding and semantic tasks, they do not directly provide precise 3D detection results. Moreover, point cloud inputs significantly restrict the use cases [72], as they are not always accessible in many practical scenarios. In contrast to these approaches, we aim to develop a foundation model specifically dedicated to 3D detection tasks with the most general inputs, monocular images. By leveraging the powerful priors from 2D vision foundation models, our approach enables the detection of any 3D object with arbitrary camera configurations, presenting a broad range of practical applications." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.441, + 0.804, + 0.459 + ], + "angle": 0, + "content": "3. Detect Anything 3D in the Wild" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.468, + 0.625, + 0.483 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.491, + 0.907, + 0.626 + ], + "angle": 0, + "content": "As illustrated in Figure 2(a), DetAny3D takes a monocular RGB image and prompts (e.g., boxes, points, text, intrinsic) as input. The box, point, and text prompts are used to specify objects, while the intrinsic prompts are optional. When not provided, the model predicts the intrinsic parameters and the corresponding 3D detection results. If intrinsic are available, the model can leverage them as geometric constraints to mitigate the ill-posed nature of monocular depth estimation and calibrate its detection results." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.909, + 0.809 + ], + "angle": 0, + "content": "Specifically, the monocular image is embedded in parallel by two foundational models: SAM [33] for low-level pixel information, underpins the entire promptable architecture. And depth-pretrained DINO [51, 54], which provide rich high-level geometric knowledge, excels in depth-related tasks. These complementary 2D features are then fused through our proposed 2D Aggregator (see Figure 2(b)), which hierarchically aligns low-level and high-level information using cross-attention layers. The fused features are subsequently passed to the Depth/Camera Module, which extracts the camera and camera-aware depth embedding, collectively referred to as geometric embedding." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.909, + 0.902 + ], + "angle": 0, + "content": "The geometric embedding and the 3D bounding box tokens with encoded prompt tokens are then fed into the 3D Interpreter (see Figure 2(c)), which employs a structure similar to the SAM decoder along with a specialized Zero-Embedding Mapping (ZEM) mechanism. 3D Interpreter injects 3D geometric features while ensuring stable 2D-to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.092, + 0.892, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.251, + 0.892, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.384, + 0.908, + 0.468 + ], + "angle": 0, + "content": "Figure 2. Overview of DetAny3D. It supports arbitrary monocular images as input and performs 3D object detection driven by prompts—box, point, and text to specify target objects and optional camera calibration to calibrate geometric projections. DetAny3D comprises two key modules: (b) 2D Aggregator, which employs a hierarchical cross-attention mechanism to dynamically fuse knowledge from SAM and DINO, with a learnable gate controlling each component's contribution to the geometric embedding; (c) 3D Interpreter, which introduces a Zero-Embedding Mapping (ZEM) strategy based on zero-initialized layers to gradually inject geometric priors, thereby enables zero-shot 3D grounding and avoids catastrophic forgetting during knowledge transfer." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.484, + 0.484, + 0.56 + ], + "angle": 0, + "content": "3D knowledge transfer, enabling progressive 3D grounding across diverse data domains. Finally, the model predicts 3D boxes based on the hidden states of the 3D box tokens. Our DetAny3D is trained on selected seen classes and can detect any unseen classes in a zero-shot manner." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.573, + 0.242, + 0.589 + ], + "angle": 0, + "content": "3.2.2D Aggregator" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.596, + 0.483, + 0.702 + ], + "angle": 0, + "content": "To effectively fuse multiple foundation models, we propose 2D Aggregator to aggregate features from SAM and DINO, mitigating potential conflicts between their heterogeneous representations. As illustrated in Figure 2(b), the 2D Aggregator fuses features from SAM and DINO in a hierarchical manner, progressively integrating spatial and geometric information across four cascaded alignment units." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.703, + 0.483, + 0.854 + ], + "angle": 0, + "content": "Feature Extraction. Given an input image, the SAM encoder extracts high-resolution spatial features \\(\\mathbf{F}_s\\in\\) \\(\\mathbb{R}^{H_s\\times W_s\\times C}\\), capturing fine-grained details and boundaries. Simultaneously, the DINO encoder outputs geometry-aware embeddings \\(\\mathbf{F}_d\\in \\mathbb{R}^{H_d\\times W_d\\times C}\\), which is depth-pretrained by UniDepth [54] and provides robust priors for depth and intrinsics. Following the design of ViT Adapter [16], we also employ a convolutional structure to produce preliminary image features, denoted as \\(\\mathbf{F}_q^0\\), serving as the initial query for subsequent attention-based fusion." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Hierarchical Fusion. Each of the four alignment units fuses SAM and DINO features via cross-attention. In the \\(i\\)-th unit, we first apply learnable gating weights \\(\\alpha_{i}\\) (initial" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.483, + 0.905, + 0.515 + ], + "angle": 0, + "content": "ized to 0.5) to combine the \\(i\\)-th block of SAM features \\(\\mathbf{F}_s^i\\) and DINO features \\(\\mathbf{F}_d^i\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.524, + 0.905, + 0.542 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {\\text {f u s e d}} ^ {i} = \\alpha_ {i} \\cdot \\mathbf {F} _ {s} ^ {i} + (1 - \\alpha_ {i}) \\cdot \\mathbf {F} _ {d} ^ {i}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.55, + 0.905, + 0.582 + ], + "angle": 0, + "content": "We use \\(\\mathbf{F}_{\\mathrm{fused}}^{i}\\) as key and value, while the query feature \\(\\mathbf{F}_q^{i - 1}\\) acts as the query in the cross-attention mechanism:" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.591, + 0.905, + 0.611 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {q} ^ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {F} _ {q} ^ {i - 1}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.621, + 0.905, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {F}} _ {\\text {f u s e d}} ^ {i} = \\operatorname {N o r m} \\left(\\mathbf {F} _ {\\text {f u s e d}} ^ {i} + \\mathbf {F} _ {q} ^ {i}\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.644, + 0.907, + 0.704 + ], + "angle": 0, + "content": "This design enables the model to dynamically emphasize SAM's spatial details or DINO's semantic and geometric cues at different hierarchy levels while minimizing interference between the two representations." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.704, + 0.907, + 0.84 + ], + "angle": 0, + "content": "Geometric Embeddings. The fused features \\(\\hat{\\mathbf{F}}_{\\mathrm{fused}}^i\\), \\(i \\in [1,2,3,4]\\), are subsequently processed by the depth and camera modules, following the UniDepth [54] architecture. Specifically, these modules predict the camera embedding \\(\\mathbf{C}\\) and camera-aware depth embedding \\(\\mathbf{D}|\\mathbf{C}\\), referred as the geometric embedding \\(\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C}, \\mathbf{C}\\}\\). These modules provide aligned depth and camera parameters under the monocular depth ill-posed problem. Further details can be found in the Supplementary material Section 7.1." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.841, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Overall, by progressively aligning multi-scale features and adaptively integrating their contributions, 2D Aggregator effectively leverages the strengths of both foundation models while minimizing potential conflicts." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.241, + 0.108 + ], + "angle": 0, + "content": "3.3. 3D Interpreter" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.113, + 0.484, + 0.28 + ], + "angle": 0, + "content": "The diverse 3D object supervisions across various scenarios, depths, and camera intrinsics introduce challenges to model training. Our 3D Interpreter aims to progressively integrate geometric information while ensuring stable 2D-to-3D knowledge transfer. We introduce Zero-Embedding Mapping (ZEM) mechanism, which incrementally infuses 3D geometry into the decoder via zero-initialized layers—without disrupting the original 2D features. As Figure 2(c) shows, the 3D Interpreter comprises three main components: the Two-Way Transformer, the Geometric Transformer, and the 3D bounding box heads." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.281, + 0.484, + 0.327 + ], + "angle": 0, + "content": "Two-Way Transformer. Following the SAM design, we first concatenate the 3D bounding box tokens with prompt-related tokens to form the query:" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.338, + 0.483, + 0.365 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = \\left[ \\left[ \\mathbf {T} _ {\\mathrm {3 D}, 1}; \\mathbf {T} _ {\\mathrm {p}, 1} \\right], \\dots , \\left[ \\mathbf {T} _ {\\mathrm {3 D}, N}; \\mathbf {T} _ {\\mathrm {p}, N} \\right] \\right], \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.376, + 0.484, + 0.452 + ], + "angle": 0, + "content": "where \\(\\mathbf{T}_{3\\mathrm{D},i}\\) denotes the 3D bounding box token for the \\(i\\)-th object, \\(\\mathbf{T}_{\\mathrm{p},i}\\) is the prompt-related token, and \\([\\cdot ;\\cdot ]\\) denotes vector concatenation. The SAM encoder output \\(\\mathbf{F}_s\\) serves as both key and value for the first Two-Way Transformer layer, yielding:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.465, + 0.483, + 0.482 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {s} ^ {\\prime} = \\text {T w o W a y T r a n s} (\\mathbf {Q}, \\mathbf {F} _ {s}, \\mathbf {F} _ {s}). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.495, + 0.483, + 0.525 + ], + "angle": 0, + "content": "The initialized parameters of two-way transformer are copied using pre-trained SAM decoder." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.526, + 0.484, + 0.587 + ], + "angle": 0, + "content": "Geometric Transformer. We then process the geometric embedding \\(\\mathbf{G}\\) (from the 2D Aggregator) through the zero-initialized \\(1 \\times 1\\) convolutional layer ZEM and add it to \\(\\mathbf{F}_s\\) for use as key and value in the Geometric Transformer:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.599, + 0.483, + 0.617 + ], + "angle": 0, + "content": "\\[\n\\mathbf {G} ^ {\\prime} = \\operatorname {G e o T r a n s} (\\mathbf {Q}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.63, + 0.484, + 0.705 + ], + "angle": 0, + "content": "ZEM integrates the geometric embedding and avoids catastrophic forgetting in 2D features. Next, \\(\\mathbf{G}'\\) is again passed through ZEM and combined with \\(\\mathbf{F}_s'\\). This enriched representation is used as key and value in the second Two-Way Transformer layer to generate object features \\(\\mathbf{O}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.718, + 0.484, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\mathbf {O} = \\text {T w o W a y T r a n s} \\left(\\mathbf {Q} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.795 + ], + "angle": 0, + "content": "ZEM also helps stabilize parameter updates in the two-way and geometric transformer training, preventing conflicts arising from diverse 3D object supervision." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.484, + 0.901 + ], + "angle": 0, + "content": "3D Bounding Box Heads. Finally, \\(\\mathbf{O}\\) is fed into the 3D bounding box heads to calculate the final predictions, which follows typical architectures from standard 3D detection frameworks [8, 66, 80]: \\(B_{\\mathrm{3D}}(x,y,z,w,h,l,R,S)\\) where \\(x,y,z\\) specify the 3D box center, \\(w,h,l\\) are its dimensions, \\(R\\) is the rotation matrix, and \\(S\\) is the predicted 3D Intersection over Union (IoU) score." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.587, + 0.105 + ], + "angle": 0, + "content": "3.4. Loss" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.113, + 0.907, + 0.326 + ], + "angle": 0, + "content": "Our loss function comprises three components, the depth loss \\(\\mathcal{L}_{\\mathrm{depth}}\\), the camera intrinsic loss \\(\\mathcal{L}_{\\mathrm{cam}}\\), and the detection loss \\(\\mathcal{L}_{\\mathrm{det}}\\). The overall loss is defined as the sum of these three components. For depth loss \\(\\mathcal{L}_{\\mathrm{depth}}\\), we adopt the commonly used SILog loss [19, 64] to supervise depth prediction. For camera intrinsic loss \\(\\mathcal{L}_{\\mathrm{cam}}\\), we follow the dense camera ray approach [30, 54] to represent intrinsics and also employ the SILog loss to measure deviations between predicted and ground-truth parameters. At last, for detection loss \\(\\mathcal{L}_{\\mathrm{det}}\\), we use the smooth L1 loss [40, 66, 80] to regress 3D bounding boxes parameters and predicted IOU scores and the Chamfer loss [8, 74] for rotation matrices. Detailed formulations of these loss functions can be found in the supplementary material Section 7.3." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.336, + 0.7, + 0.352 + ], + "angle": 0, + "content": "3.5. Prompt Interaction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.359, + 0.906, + 0.42 + ], + "angle": 0, + "content": "DetAny3D supports point, box, and text prompts to detect 3D box for user-specified objects. To calibrate more precise depth for specific camera, DetAny3D allows users to specify the camera configuration via the intrinsic prompt." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.42, + 0.905, + 0.51 + ], + "angle": 0, + "content": "Box and Point Prompts. Following SAM's methodology, both box and point prompts are encoded based on their respective positions and embeddings. For the box prompt, two points (top-left and bottom-right corners) are used. The point prompt is derived by combining the positional encoding of the point and the corresponding embedding." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.511, + 0.906, + 0.6 + ], + "angle": 0, + "content": "Text Prompts. Recent 2D foundation models like Grounding DINO [44] are able to detect bounding box for the open-vocabulary object specified by users using text prompt. DetAny3D can further generate 3D bounding box using the prediction of Grounding DINO, which enables text as prompts in the zero-shot interface." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.602, + 0.906, + 0.678 + ], + "angle": 0, + "content": "Intrinsic Prompts. Unlike most existing 3D detectors that employ a fixed virtual camera and rely on GT intrinsics to recover the true depth, inspired by Unidepth, we predict intrinsics for camera-aware 3D detection. When no intrinsic prompt is given, the model infers intrinsics for outputs:" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.69, + 0.905, + 0.709 + ], + "angle": 0, + "content": "\\[\n\\operatorname {B o x} _ {3 D} = 3 \\text {D I n t e r p r e t o r} (\\mathbf {Q}, \\hat {\\mathbf {G}}, \\mathbf {F} _ {s}), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.721, + 0.906, + 0.797 + ], + "angle": 0, + "content": "where \\(\\hat{\\mathbf{G}} = \\{\\mathbf{D}|\\hat{\\mathbf{C}},\\hat{\\mathbf{C}}\\}\\), \\(\\hat{\\mathbf{C}}\\) is the predicted camera embedding, and \\(\\mathbf{D}|\\hat{\\mathbf{C}}\\) is the depth embedding conditioned on the predicted camera embedding. When intrinsic prompts are given, the model refines the 3D detection results based on the true intrinsic:" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.811, + 0.905, + 0.829 + ], + "angle": 0, + "content": "\\[\n\\operatorname {B o x} _ {3 D} = 3 \\mathrm {D I n t e r p r e t o r} (\\mathbf {Q}, \\mathbf {G}, \\mathbf {F} _ {s}), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.906, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C},\\mathbf{C}\\}\\). This boosts performance on both intrinsic prediction and 3D detection since the model continuously predicts and aligns the intrinsic with the 3D detection rather than estimating it solely from input image." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.216, + 0.108 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.116, + 0.281, + 0.133 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.14, + 0.486, + 0.487 + ], + "angle": 0, + "content": "DA3D Benchmark. We present DA3D, a unified 3D detection dataset that aggregates 16 diverse datasets for 3D detection and depth estimation. Building upon Omni3D's original datasets (Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]), we incorporate additional four outdoor detection datasets (Argoverse2 [70], A2D2 [25], Waymo [62], Cityscapes3D [21]), one indoor detection dataset (3RScan [65]), and five depth and intrinsic datasets (Scannet [17], Taskonomy [77], DrivingStereo [71], Middlebury [59], IBIMS-1 [34]). All data is standardized with monocular images, camera intrinsics, 3D bounding boxes, and depth maps. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as zero-shot test classes. We select Cityscapes3D, Waymo, and 3RScan as our zero-shot datasets with novel camera configurations, where 3RScan also contains novel object categories. Depth supervision from LiDAR, RGB-D, and stereo sensors enhances \\(75\\%\\) of training samples, while intrinsic parameters cover 20 camera configurations across 0.4 million frames \\((2.5\\times\\) Omni3D's scale). Dataset statistics and splits are detailed in Supplementary material Section 6. All data are subject to their respective licenses." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.489, + 0.485, + 0.626 + ], + "angle": 0, + "content": "Baselines. We choose Cube R-CNN [8] and OV-Mono3D [74] as our primary baselines, as their settings align most closely with our experimental protocol: Cube R-CNN is a benchmark provided by the Omni3D dataset. It is a unified detector capable of performing detection on predefined categories. OVMono3D is a recently available open-vocabulary 3D detector on the Omni3D dataset. It lifts 2D detection to 3D by connecting the open-vocabulary 2D detector Grounding DINO [44] with a detection head." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.628, + 0.486, + 0.824 + ], + "angle": 0, + "content": "Metrics. We adopt the metrics in the Omni3D benchmark [8], which is Average Precision (AP). Predictions are matched to ground-truth by measuring their overlap using IoU3D, which computes the intersection-over-union (IoU) of 3D cuboids. The IoU3D thresholds range from \\(\\tau \\in [0.05, 0.10, \\dots, 0.50]\\). For experiments using text prompts, we additionally employ target-aware metrics from OVMono3D [74]: Prompt the detector only with category names present in the per-image annotations instead of providing an exhaustive category list. This addresses severe naming ambiguity (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotation issues prevalent in indoor datasets like 3RScan (see Supplementary material Section 8.)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Implementation Details. We implement DetAny3D via PyTorch [53]. We use the pretrained ViT-L DINOv2 [51, 54] and ViT-H SAM [33] as our initial models, with SAM serving as the promptable backbone, where the encoder is frozen during training. All main experiments are conducted" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.26 + ], + "angle": 0, + "content": "using 8 NVIDIA A100 machines with 8 GPUs for each and a batch size of 64. The model is trained for 80 epochs, taking approximately 2 weeks to complete. The training uses the AdamW [47] optimizer with an initial learning rate of 0.0001, adjusted according to the cosine annealing policy [46]. During box prompt training, we apply a 0.1 positional offset disturbance. For point prompt training, points are randomly selected from the mask. Text prompts are converted into box prompts via Grounding DINO SwinT [44]. For fair comparisons, all baseline-related experiments incorporate intrinsic prompts and use aligned prompt inputs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.272, + 0.653, + 0.287 + ], + "angle": 0, + "content": "4.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.295, + 0.909, + 0.568 + ], + "angle": 0, + "content": "Zero-shot Category Performance. In this experiment, we use two sources for the prompt input: text prompt processed by Grounding DINO and box prompt from ground-truth 2D bounding box. We evaluate our model on KITTI, SUNRGBD, and ARKitScenes datasets with the same zero-shot categories as OVMono3D [74]. As shown in Table 1 (left), our DetAny3D demonstrates superior zero-shot adaptation performance compared to the OVMono3D baseline. When using Grounding DINO for text prompt input, our method achieves significant improvements of \\(21.02\\mathrm{AP}_{3\\mathrm{D}}\\) on KITTI, \\(4.29\\mathrm{AP}_{3\\mathrm{D}}\\) on SUNRGBD, and \\(11.35\\mathrm{AP}_{3\\mathrm{D}}\\) on ARKitScenes under the target-aware metric. When using 2D ground-truth as box prompt input, DetAny3D attains \\(28.96\\mathrm{AP}_{3\\mathrm{D}}\\) on KITTI, \\(39.09\\mathrm{AP}_{3\\mathrm{D}}\\) on SUNRGBD, and \\(57.72\\mathrm{AP}_{3\\mathrm{D}}\\) on ARKitScenes, showing \\(3.4\\times\\), \\(2.3\\times\\), and \\(4.1\\times\\) gains over the baseline, respectively. This substantial performance gap highlights our method's enhanced ability to generalize to novel object categories." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.57, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Zero-shot Camera Performance. To assess robustness against novel camera parameters, we conduct cross-dataset evaluation as shown in Table 1 (right). For Cityscapes3D and Waymo, We use Cube R-CNN's 2D detections and ground-truth as box prompt and Grounding DINO processed text prompt for comparison. For 3RScan, due to namespace inconsistency with Cube R-CNN's predefined categories and the presence of novel classes, we only use text prompt and ground-truth box prompts, benchmarking against OVMono3D. DetAny3D exhibits strong adaptation to unseen camera configurations. When using Cube R-CNN-aligned prompts, our model achieves \\(\\mathrm{AP}_{3\\mathrm{D}}\\) scores of 10.33 and 15.17 on Cityscapes3D and Waymo, respectively, surpassing Cube R-CNN by +2.11 and +5.74. With text prompts, under identical settings as OVMono3D [74], our method improves \\(\\mathrm{AP}_{3\\mathrm{D}}\\) by +4.73 on Cityscapes3D, +5.68 on Waymo, and +1.1 on 3RScan under target-aware metrics. Both models show low scores on conventional metrics for 3RScan due to severe naming ambiguity and missing annotations. Using 2D ground-truth as box prompts, DetAny3D attains \\(\\mathrm{AP}_{3\\mathrm{D}}\\) of 16.88, 15.83, and 21.36 across the three datasets, outperforming OVMono3D by +6.82, +5.6," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.907, + 0.132 + ], + "angle": 0, + "content": "Table 1. Zero-shot 3D detection performance comparison on novel categories (left) and novel cameras (right). Results report \\(\\mathrm{AP}_{\\mathrm{3D}}\\) with different prompt strategies: (1) Cube R-CNN, (2) Grounding DINO outputs (traditional metric / target-aware metric) and (3) Ground Truth. Target-aware metric uses per-image existing categories for prompting." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.133, + 0.904, + 0.336 + ], + "angle": 0, + "content": "
PromptMethodNovel CategoriesNovel Cameras
APkit3DAPsun3DAPpark3DAPcity3DAPwym3DAP3rs3D
-Cube R-CNN [8]---8.229.43-
Cube R-CNNOVMono3D [74]---4.9710.89-
DetAny3D (ours)---10.3315.17-
Δ---+5.36+4.28-
Grounding DINOOVMono3D [74]4.71 / 4.714.07 / 16.7813.21 / 13.215.88 / 10.989.20 / 10.270.37 / 8.48
DetAny3D (ours)25.73 / 25.737.63 / 21.0724.56 / 24.5611.05 / 15.7115.38 / 15.950.65 / 9.58
Δ+21.02 / +21.02+3.56 / +4.29+11.35 / +11.35+5.17 / +4.73+6.18 / +5.68+0.28 / +1.10
Ground TruthOVMono3D [74]8.4417.1614.1210.0610.2318.05
DetAny3D (ours)28.9639.0957.7216.8815.8321.36
Δ+20.52+21.93+43.60+6.82+5.60+3.31
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.34, + 0.907, + 0.381 + ], + "angle": 0, + "content": "Table 2. In-domain performance comparison between DetAny3D and baselines. The first three columns show results trained only on NuScenes and KITTI, while the next seven columns show results trained on the unified dataset. Two prompt sources are used: (1) Cube R-CNN 2D detections, (2) Ground Truth." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.383, + 0.905, + 0.535 + ], + "angle": 0, + "content": "
MethodOmni3D_OUTOMni3D
APkit3D↑APnus3D↑APout3D↑APkit3D↑APnus3D↑APsun3D↑APark3D↑APobj3D↑APhyp3D↑AP3D↑
ImVoxelNet [58]23.523.421.5------9.4
SMOKE [45]25.920.420.0------10.4
OV-Uni3DETR [68]35.133.031.6-------
Cube R-CNN [8]36.032.731.932.5030.0615.3341.7350.847.4823.26
OVMono3D [74]w/Cube RCNN---25.4524.3315.2041.6058.877.7522.98
DetAny3D (ours)w/Cube RCNN35.833.932.231.6130.9718.9646.1354.427.1724.92
OVMono3D [74]w/Ground Truth---33.6923.7927.8340.8556.6411.9925.32
DetAny3D (ours)w/Ground Truth38.036.735.938.6837.5546.1450.6256.8215.9834.38
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.549, + 0.483, + 0.594 + ], + "angle": 0, + "content": "and \\(+3.31\\), respectively. These results highlight the effectiveness of our architecture and its potential for real-world applications with arbitrary camera configurations." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.595, + 0.483, + 0.746 + ], + "angle": 0, + "content": "In-domain Performance We also evaluate our model's in-domain detection capability using two prompt sources: 2D detections from Cube R-CNN and 2D ground-truth. Besides the unified model, we also train DetAny3D on Omni3D_out for comparison. As shown in Table 2, DetAny3D achieves competitive results with Cube R-CNN when provided with aligned input. Using GT prompts, DetAny3D outperforms OVMono3D by \\(9.06\\mathrm{AP}_{3\\mathrm{D}}\\), indicating that Cube R-CNN may bottleneck performance, and stronger 2D prompts could further boost results." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.757, + 0.395, + 0.773 + ], + "angle": 0, + "content": "4.3. Possible Applications of DetAny3D" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.779, + 0.483, + 0.84 + ], + "angle": 0, + "content": "Other than robustly detecting diverse corner cases in real-world tasks such as autonomous driving and embodied perception, DetAny3D's open-world detection results can further serve as inputs for advanced downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.902 + ], + "angle": 0, + "content": "3D Bounding Box Guided Video Generation. We feed DetAny3D outputs into Sora for zero-shot, open-world 3D box guided video generation. As shown in Figure 3, we compare: (i) image + 3D box + text, (ii) image + 2D box +" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.553, + 0.907, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.798, + 0.907, + 0.882 + ], + "angle": 0, + "content": "Figure 3. Zero-Shot Transfer Video Generation via Sora. We provide Sora with Internet-sourced images. As shown, when controlled with 3D bounding box, Sora can better capture the scene's geometric relationships. In contrast, with only controlled by 2D bounding box prompt, Sora respects pixel-level spatial cues but fails to generate accurate geometric offset." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.089, + 0.357, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.09, + 0.621, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.091, + 0.888, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.228, + 0.888, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.377, + 0.908, + 0.42 + ], + "angle": 0, + "content": "Figure 4. Qualitative Results. We present qualitative examples from open-world detection. In each pair of images, the top row is produced by OVMono3D, and the bottom row by DetAny3D. For each example, the left sub-figure overlays the projected 3D bounding boxes, while the right sub-figure shows the corresponding bird's-eye view with \\(1\\mathrm{m} \\times 1\\mathrm{m}\\) grids as the background." + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.436, + 0.483, + 0.493 + ], + "angle": 0, + "content": "Table 3. Ablation study of DetAny3D. The table shows the impact of different design choices on \\(\\mathrm{AP}_{\\mathrm{3D}}\\) performance. Each component is progressively added. To save resources, ablations are conducted on \\(10\\%\\) of the full training dataset." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.503, + 0.484, + 0.598 + ], + "angle": 0, + "content": "
Depth&Cam.Merge DINO2D Agg.ZEMAP3D ↑
----5.81
---10.10
--20.20
-23.21
25.80
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.608, + 0.483, + 0.639 + ], + "angle": 0, + "content": "text, and (iii) image + text. With 3D box constraints, Sora generates videos better aligned with intent." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.649, + 0.255, + 0.663 + ], + "angle": 0, + "content": "4.4. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.671, + 0.483, + 0.761 + ], + "angle": 0, + "content": "As shown in Table 3, we ablate key components of DetAny3D, showing the evolution from a SAM-based baseline to DetAny3D with strong 3D generalization. The base model extends SAM with 3D box tokens and a 3D head for direct box prediction. Additional ablations, including backbone and prompt types, are in Supplementary Section 9." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.765, + 0.483, + 0.854 + ], + "angle": 0, + "content": "- Effectiveness of Depth & Camera Modules. Depth map provides denser supervision, while camera configuration intrinsic help mitigate disruptions caused by multiple datasets training. Integrating both depth map and camera intrinsic yields improvement in 3D feature extraction and generalization across diverse datasets." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.856, + 0.484, + 0.901 + ], + "angle": 0, + "content": "- Effectiveness of Merging Depth-Pretrained DINO. Incorporating depth-pretrained DINO yields remarkable improvements, demonstrating that the rich geometric in" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.765, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.438, + 0.907, + 0.468 + ], + "angle": 0, + "content": "formation from DINO effectively compensates for SAM's limited geometric understanding." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.469, + 0.907, + 0.543 + ], + "angle": 0, + "content": "- Effectiveness of 2D Aggregator. Compared to directly adding the features from two models, the 2D Aggregator reduces conflicts between different foundation models, further unleashing the performance gains from two foundation model integration." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.544, + 0.906, + 0.619 + ], + "angle": 0, + "content": "- Effectiveness of ZEM. ZEM mechanism integrates geometric features through zero-initialized layers, which enables stable 2D-to-3D knowledge transfer during training across datasets with diverse camera parameters, scenes, and depth distributions." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.469, + 0.907, + 0.619 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.629, + 0.696, + 0.644 + ], + "angle": 0, + "content": "4.5. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.651, + 0.907, + 0.711 + ], + "angle": 0, + "content": "We provide qualitative comparisons with OVMono3D. As shown in Figure 4, our model predicts more accurate intrinsics when the camera parameters are unknown and infers more consistent camera poses and 3D detections." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.725, + 0.64, + 0.74 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.907, + 0.901 + ], + "angle": 0, + "content": "We propose DetAny3D, a promptable 3D detection foundation model that can detect arbitrary 3D objects from any monocular image input. DetAny3D exhibits significant zero-shot detection capabilities across diverse domains and effective zero-shot transfer across various tasks, highlighting its suitability for real-world deployment in dynamic and unstructured environments. Moreover, its flexible and robust detection ability opens the door to gathering large-scale, multi-source data for more 3D perception-guided tasks, paving the way toward open-world systems." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.259, + 0.108 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.116, + 0.486, + 0.299 + ], + "angle": 0, + "content": "We sincerely thank Jiazhi Yang, Tianyu Li, Haochen Tian, Jisong Cai, and Li Chen for their invaluable discussions and constructive feedback throughout this project. Their insights and expertise have contributed significantly to the success of this work. We also appreciate the continuous support and encouragement from all the members of OpenDriveLab. This work is supported by the National Key Research and Development Program of China (2024YFE0210700), the National Natural Science Foundation of China (NSFC) under Grants 62206172 and 62432008, and the Shanghai Artificial Intelligence Laboratory. It is also partially funded by Meituan Inc." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.315, + 0.188, + 0.332 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.34, + 0.482, + 0.409 + ], + "angle": 0, + "content": "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.412, + 0.483, + 0.468 + ], + "angle": 0, + "content": "[2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In CVPR, 2021. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.47, + 0.483, + 0.511 + ], + "angle": 0, + "content": "[3] Umar Asif, Jianbin Tang, and Stefan Harrer. Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices. In IJCAI, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.515, + 0.482, + 0.583 + ], + "angle": 0, + "content": "[4] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Yuri Feigin, Peter Fu, Thomas Gebauer, Daniel Kurz, Tal Dimry, Brandon Joffe, Arik Schwartz, et al. Arkitsscenes: A diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. In NeurIPS Datasets, 2021. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.586, + 0.482, + 0.655 + ], + "angle": 0, + "content": "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.657, + 0.482, + 0.699 + ], + "angle": 0, + "content": "[6] Georg Biegelbauer and Markus Vincze. Efficient 3d object detection by fitting superquadrics to range image data for robot's object manipulation. In ICRA, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.702, + 0.482, + 0.77 + ], + "angle": 0, + "content": "[7] Aleksei Bochkovskii, Amaël Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.774, + 0.482, + 0.829 + ], + "angle": 0, + "content": "[8] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild. In CVPR, 2023. 2, 3, 5, 6, 7, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.832, + 0.482, + 0.899 + ], + "angle": 0, + "content": "[9] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 3, 6, 13" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.34, + 0.483, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[10] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.151, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[11] Sergio Casas, Abbas Sadat, and Raquel Urtasun. Mp3: A unified model to map, perceive, predict and plan. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.194, + 0.905, + 0.235 + ], + "angle": 0, + "content": "[12] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE TPAMI, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.238, + 0.905, + 0.293 + ], + "angle": 0, + "content": "[13] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.296, + 0.905, + 0.337 + ], + "angle": 0, + "content": "[14] Xiaozhi Chen, Kaustav Kundu, Ziyu Zhang, Huimin Ma, Sanja Fidler, and Raquel Urtasun. Monocular 3d object detection for autonomous driving. In CVPR, 2016. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.34, + 0.905, + 0.38 + ], + "angle": 0, + "content": "[15] Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.383, + 0.905, + 0.424 + ], + "angle": 0, + "content": "[16] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. In ICLR, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.427, + 0.905, + 0.481 + ], + "angle": 0, + "content": "[17] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.484, + 0.905, + 0.525 + ], + "angle": 0, + "content": "[18] Saumitro Dasgupta, Kuan Fang, Kevin Chen, and Silvio Savarese. Delay: Robust spatial layout estimation for cluttered indoor scenes. In CVPR, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.528, + 0.905, + 0.569 + ], + "angle": 0, + "content": "[19] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. In NeurIPS, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.572, + 0.905, + 0.613 + ], + "angle": 0, + "content": "[20] Hao-Shu Fang, Chenxi Wang, Minghao Gou, and Cewu Lu. Graspnet-1billion: A large-scale benchmark for general object grasping. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.616, + 0.905, + 0.669 + ], + "angle": 0, + "content": "[21] Nils Gählert, Nicolas Jourdan, Marius Cordts, Uwe Franke, and Joachim Denzler. Cityscapes 3d: Dataset and benchmark for 9 dof vehicle detection. arXiv preprint arXiv:2006.07864, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.673, + 0.905, + 0.727 + ], + "angle": 0, + "content": "[22] Ruiyuan Gao, Kai Chen, Enze Xie, HONG Lanqing, Zhenguo Li, Dit-Yan Yeung, and Qiang Xu. Magicdrive: Street view generation with diverse 3d geometry control. In ICLR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.731, + 0.905, + 0.785 + ], + "angle": 0, + "content": "[23] Ruiyuan Gao, Kai Chen, Zhihao Li, Lanqing Hong, Zhenguo Li, and Qiang Xu. Magicdrive3d: Controllable 3d generation for any-view rendering in street scenes. arXiv preprint arXiv:2405.14475, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.788, + 0.905, + 0.829 + ], + "angle": 0, + "content": "[24] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. *IJRR*, 2013. 3, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[25] Jakob Geyer, Yohannes Kassahun, Mentor Mahmudi, Xavier Ricou, Rupesh Durgesh, Andrew S Chung, Lorenz Hauswald, Viet Hoang Pham, Maximilian Mühlegg, Sebastian Dorn, et al. A2d2: Audi autonomous driving dataset. arXiv preprint arXiv:2004.06320, 2020. 6" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.161 + ], + "angle": 0, + "content": "[26] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.163, + 0.485, + 0.245 + ], + "angle": 0, + "content": "[27] Ziyu Guo*, Renrui Zhang*, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.248, + 0.485, + 0.316 + ], + "angle": 0, + "content": "[28] Ziyu Guo*, Renrui Zhang*#, Xiangyang Zhu, Chengzhuo Tong, Peng Gao, Chunyuan Li, and Pheng-Ann Heng. Sam2point: Segment any 3d as videos in zero-shot and promptable manners. arXiv preprint arXiv:2408.16768, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.319, + 0.484, + 0.387 + ], + "angle": 0, + "content": "[29] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.389, + 0.484, + 0.445 + ], + "angle": 0, + "content": "[30] Xiankang He, Guangkai Xu, Bo Zhang, Hao Chen, Ying Cui, and Dongyan Guo. Diffcalib: Reformulating monocular camera calibration as diffusion-based dense incident map generation. arXiv preprint arXiv: 2405.15619, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.446, + 0.484, + 0.5 + ], + "angle": 0, + "content": "[31] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.502, + 0.484, + 0.557 + ], + "angle": 0, + "content": "[32] Jin-Cheng Jhang, Tao Tu, Fu-En Wang, Ke Zhang, Min Sun, and Cheng-Hao Kuo. V-mind: Building versatile monocular indoor 3d detector with diverse 2d annotations. In WACV, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.56, + 0.484, + 0.615 + ], + "angle": 0, + "content": "[33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.617, + 0.484, + 0.657 + ], + "angle": 0, + "content": "[34] Tobias Koch, Lukas Liebel, Friedrich Fraundorfer, and Marco Korner. Evaluation of cnn-based single-image depth estimation methods. In ECCVW, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.66, + 0.484, + 0.714 + ], + "angle": 0, + "content": "[35] Maksim Kolodiazhnyi, Anna Vorontsova, Matvey Skripkin, Danila Rukhovich, and Anton Konushin. Unidet3d: Multi-dataset indoor 3d object detection. arXiv preprint arXiv:2409.04234, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.717, + 0.484, + 0.758 + ], + "angle": 0, + "content": "[36] Buyu Li, Wanli Ouyang, Lu Sheng, Xingyu Zeng, and Xiaogang Wang. Gs3d: An efficient 3d object detection framework for autonomous driving. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.76, + 0.484, + 0.815 + ], + "angle": 0, + "content": "[37] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.484, + 0.871 + ], + "angle": 0, + "content": "[38] Xiaofan Li, Yifu Zhang, and Xiaoqing Ye. Drivingdiffusion: Layout-guided multi-view driving scenarios video generation with latent diffusion model. In European Conference on Computer Vision, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[39] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer:" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.121 + ], + "angle": 0, + "content": "learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE TPAMI, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.123, + 0.905, + 0.163 + ], + "angle": 0, + "content": "[40] Zhuoling Li, Xiaogang Xu, SerNam Lim, and Hengshuang Zhao. Unimode: Unified monocular 3d object detection. In CVPR, 2024. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.166, + 0.905, + 0.221 + ], + "angle": 0, + "content": "[41] Tingting Liang, Hongwei Xie, Kaicheng Yu, Zhongyu Xia, Zhiwei Lin, Yongtao Wang, Tao Tang, Bing Wang, and Zhi Tang. Bevfusion: A simple and robust lidar-camera fusion framework. In NeurIPS, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.224, + 0.905, + 0.279 + ], + "angle": 0, + "content": "[42] Xuewu Lin, Tianwei Lin, Zixiang Pei, Lichao Huang, and Zhizhong Su. Sparse4d: Multi-view 3d object detection with sparse spatial-temporal fusion. arXiv preprint arXiv:2211.10581, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.282, + 0.905, + 0.322 + ], + "angle": 0, + "content": "[43] Luyang Liu, Hongyu Li, and Marco Gruteser. Edge assisted real-time object detection for mobile augmented reality. In MobiCom, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.325, + 0.905, + 0.393 + ], + "angle": 0, + "content": "[44] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In ECCV, 2024. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.396, + 0.905, + 0.438 + ], + "angle": 0, + "content": "[45] Zechen Liu, Zizhang Wu, and Roland Tóth. Smoke: Single-stage monocular 3d object detection via keypoint estimation. In CVPRW, 2020. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.44, + 0.905, + 0.481 + ], + "angle": 0, + "content": "[46] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.484, + 0.905, + 0.512 + ], + "angle": 0, + "content": "[47] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.514, + 0.905, + 0.556 + ], + "angle": 0, + "content": "[48] Xinzhu Ma, Wanli Ouyang, Andrea Simonelli, and Elisa Ricci. 3d object detection from images for autonomous driving: a survey. IEEE TPAMI, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.558, + 0.905, + 0.599 + ], + "angle": 0, + "content": "[49] Jiageng Mao, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. 3d object detection for autonomous driving: A comprehensive survey. IJCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.601, + 0.905, + 0.657 + ], + "angle": 0, + "content": "[50] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.659, + 0.905, + 0.728 + ], + "angle": 0, + "content": "[51] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. TMLR, 2024. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.73, + 0.905, + 0.771 + ], + "angle": 0, + "content": "[52] Youngmin Park, Vincent Lepetit, and Woontack Woo. Multiple 3d object tracking for augmented reality. In ISMAR, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.774, + 0.905, + 0.843 + ], + "angle": 0, + "content": "[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[54] Luigi Piccinelli, Yung-Hsu Yang, Christos Sakaridis, Mattia Segu, Siyuan Li, Luc Van Gool, and Fisher Yu. Unidepth: Universal monocular metric depth estimation. In CVPR, 2024. 2, 3, 4, 5, 6, 14" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.51, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[55] Zhangyang Qi, Zhixiong Zhang, Ye Fang, Jiaqi Wang, and Hengshuang Zhao. Gpt4scene: Understand 3d scenes from videos with vision-language models. arXiv preprint arXiv:2501.01428, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.482, + 0.289 + ], + "angle": 0, + "content": "[57] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.291, + 0.482, + 0.345 + ], + "angle": 0, + "content": "[58] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In WACV, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.348, + 0.482, + 0.389 + ], + "angle": 0, + "content": "[59] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.391, + 0.482, + 0.446 + ], + "angle": 0, + "content": "[60] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beiwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In ECCV, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.448, + 0.482, + 0.489 + ], + "angle": 0, + "content": "[61] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.491, + 0.482, + 0.559 + ], + "angle": 0, + "content": "[62] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.562, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[63] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.632, + 0.482, + 0.687 + ], + "angle": 0, + "content": "[64] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. Demon: Depth and motion network for learning monocular stereo. In CVPR, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.689, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[65] Johanna Wald, Armen Avetisyan, Nassir Navab, Federico Tombari, and Matthias Nießner. Rio: 3d object instance re-localization in changing indoor environments. In ICCV, 2019. 6, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.482, + 0.787 + ], + "angle": 0, + "content": "[66] Tai Wang, Xinge Zhu, Jiangmiao Pang, and Dahua Lin. Fcos3d: Fully convolutional one-stage monocular 3d object detection. In ICCV, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.789, + 0.482, + 0.856 + ], + "angle": 0, + "content": "[67] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. Embodiedscan: A holistic multimodal 3d perception suite towards embodied ai. In CVPR, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[68] Zhenyu Wang, Ya-Li Li, Xi Chen, Hengshuang Zhao, and Shengjin Wang. Uni3detr: Unified 3d detection transformer. In NeurIPS, 2023. 2, 7" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[69] Zhenyu Wang, Yali Li, Taichi Liu, Hengshuang Zhao, and Shengjin Wang. Ov-uni3detr: Towards unified open-vocabulary 3d object detection via cycle-modality propagation. In ECCV, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.149, + 0.905, + 0.219 + ], + "angle": 0, + "content": "[70] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, et al. Argoverse 2: Next generation datasets for self-driving perception and forecasting. In NeurIPS Datasets, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.221, + 0.905, + 0.275 + ], + "angle": 0, + "content": "[71] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In CVPR, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.277, + 0.905, + 0.318 + ], + "angle": 0, + "content": "[72] Jie Yang, Bingliang Li, Ailing Zeng, Lei Zhang, and Ruimao Zhang. Open-world human-object interaction detection via multi-modal prompts. In CVPR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.32, + 0.905, + 0.36 + ], + "angle": 0, + "content": "[73] Xiuyu Yang, Yunze Man, Junkun Chen, and Yu-Xiong Wang. Scenecraft: Layout-guided 3d scene generation. In NeurIPS, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.362, + 0.905, + 0.417 + ], + "angle": 0, + "content": "[74] Jin Yao, Hao Gu, Xuweiyi Chen, Jiayun Wang, and Zezhou Cheng. Open vocabulary monocular 3d object detection. arXiv preprint arXiv:2411.16833, 2024. 2, 3, 5, 6, 7, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.419, + 0.905, + 0.475 + ], + "angle": 0, + "content": "[75] Kaixin Yao, Longwen Zhang, Xinhao Yan, Yan Zeng, Qixuan Zhang, Lan Xu, Wei Yang, Jiayuan Gu, and Jingyi Yu. Cast: Component-aligned 3d scene reconstruction from anrgb image. arXiv preprint arXiv:2502.12894, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.476, + 0.905, + 0.531 + ], + "angle": 0, + "content": "[76] Wei Yin, Chi Zhang, Hao Chen, Zhipeng Cai, Gang Yu, Kaixuan Wang, Xiaozhi Chen, and Chunhua Shen. Metric3d: Towards zero-shot metric 3d prediction from a single image. In ICCV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.905, + 0.574 + ], + "angle": 0, + "content": "[77] Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.576, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[78] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.632, + 0.905, + 0.686 + ], + "angle": 0, + "content": "[79] Renrui Zhang, Zhengkai Jiang, Ziyu Guo, Shilin Yan, Junting Pan, Hao Dong, Peng Gao, and Hongsheng Li. Personalize segment anything model with one shot. *ICLR*, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.689, + 0.905, + 0.744 + ], + "angle": 0, + "content": "[80] Renrui Zhang, Han Qiu, Tai Wang, Ziyu Guo, Ziteng Cui, Yu Qiao, Hongsheng Li, and Peng Gao. Monodetr: Depth-guided transformer for monocular 3d object detection. In ICCV, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.746, + 0.905, + 0.8 + ], + "angle": 0, + "content": "[81] Renrui Zhang, Jiaming Han, Chris Liu, Aojun Zhou, Pan Lu, Yu Qiao, Hongsheng Li, and Peng Gao. Llama-adapter: Efficient fine-tuning of large language models with zero-initialized attention. In ICLR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.802, + 0.905, + 0.871 + ], + "angle": 0, + "content": "[82] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[83] Haoyi Zhu, Honghui Yang, Xiaoyang Wu, Di Huang, Sha Zhang, Xianglong He, Hengshuang Zhao, Chunhua Shen, Yu" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.092, + 0.482, + 0.134 + ], + "angle": 0, + "content": "Qiao, Tong He, et al. Ponderv2: Pave the way for 3d foundation model with a universal pre-training paradigm. arXiv preprint arXiv:2310.08586, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.136, + 0.483, + 0.19 + ], + "angle": 0, + "content": "[84] Menglong Zhu, Konstantinos G Derpanis, Yinfei Yang, Samarth Brahmbhatt, Mabel Zhang, Cody Phillips, Matthieu Lecce, and Kostas Daniilidis. Single image 3d object detection and pose estimation for grasping. In ICRA, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.192, + 0.482, + 0.247 + ], + "angle": 0, + "content": "[85] Ziyu Zhu, Zhuofan Zhang, Xiaojian Ma, Xuesong Niu, Yixin Chen, Baoxiong Jia, Zhidong Deng, Siyuan Huang, and Qing Li. Unifying 3d vision-language understanding via promptable queries. In ECCV, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.249, + 0.482, + 0.303 + ], + "angle": 0, + "content": "[86] Yiming Zuo, Karhan Kayan, Maggie Wang, Kevin Jeon, Jia Deng, and Thomas L Griffiths. Towards foundation models for 3d vision: How close are we? arXiv preprint arXiv:2410.10799, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.303 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.339, + 0.087, + 0.66, + 0.14 + ], + "angle": 0, + "content": "Detect Anything 3D in the Wild Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.156, + 0.168, + 0.171 + ], + "angle": 0, + "content": "6. DA3D" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.184, + 0.484, + 0.32 + ], + "angle": 0, + "content": "DA3D is a unified 3D detection dataset, consists of 16 diverse datasets. It builds upon six datasets in Omni3D—Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]—while partially incorporating an additional 10 datasets to further enhance the scale, diversity, and generalization capabilities of 3D detection models. As shown in Figure 5, DA3D comprises 0.4 million frames (\\(2.5 \\times\\) the scale of Omni3D), spanning 20 distinct camera configurations." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.323, + 0.484, + 0.414 + ], + "angle": 0, + "content": "The dataset is standardized with the similar structure to Omni3D [8], including monocular RGB images, camera intrinsics, 3D bounding boxes, and depth maps. DA3D is designed to test 3D detection models across a wide variety of environments, camera configurations, and object categories, offering a more comprehensive evaluation setting." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.431, + 0.288, + 0.447 + ], + "angle": 0, + "content": "6.1. Dataset Composition" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.455, + 0.478, + 0.47 + ], + "angle": 0, + "content": "We categorize the datasets in DA3D based on two aspects:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.472, + 0.484, + 0.563 + ], + "angle": 0, + "content": "Indoor vs. Outdoor. As shown in Figure 6 (left), DA3D expands both indoor and outdoor datasets compared to Omni3D. Additionally, the ratio of indoor to outdoor data in DA3D is more balanced than in Omni3D, ensuring a more representative distribution for models trained across diverse environments." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.566, + 0.483, + 0.597 + ], + "angle": 0, + "content": "Supervision Types. We also analyze DA3D in terms of the distribution of supervision types (See Figure 6 (right)):" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.599, + 0.388, + 0.613 + ], + "angle": 0, + "content": "- \\(35\\%\\) data provides only depth supervision." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.614, + 0.458, + 0.628 + ], + "angle": 0, + "content": "- \\(23\\%\\) data provide only 3D bounding box annotations." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.629, + 0.482, + 0.656 + ], + "angle": 0, + "content": "- \\(42\\%\\) data contains both depth maps and 3D bounding boxes." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.659, + 0.405, + 0.673 + ], + "angle": 0, + "content": "- Intrinsic parameters are available for all data." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.599, + 0.482, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.69, + 0.237, + 0.707 + ], + "angle": 0, + "content": "6.2. Dataset Splits." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.715, + 0.483, + 0.746 + ], + "angle": 0, + "content": "For training and evaluation, we follow the dataset splitting strategy used in prior works [8]. Specifically:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.748, + 0.483, + 0.777 + ], + "angle": 0, + "content": "- We construct the training set by merging training subsets from the original datasets." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.778, + 0.483, + 0.807 + ], + "angle": 0, + "content": "- We form the validation set by sampling from the original training data, ensuring balanced representation." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.808, + 0.483, + 0.852 + ], + "angle": 0, + "content": "- We use the original validation sets of each dataset as the test set, allowing for direct comparison with previous benchmarks." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.748, + 0.483, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.902 + ], + "angle": 0, + "content": "This setup ensures fair evaluation and maintains consistency with existing benchmarks while assessing both indomain and zero-shot generalization capabilities." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.16, + 0.9, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.565, + 0.38, + 0.855, + 0.394 + ], + "angle": 0, + "content": "Figure 5. The composition of the DA3D dataset." + }, + { + "type": "image", + "bbox": [ + 0.525, + 0.431, + 0.892, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.578, + 0.907, + 0.62 + ], + "angle": 0, + "content": "Figure 6. The data distribution of the DA3D dataset. (left): the statistics of indoor and outdoor data. (right): the statistics of data with different supervision categories." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.647, + 0.685, + 0.664 + ], + "angle": 0, + "content": "6.3. Evaluation Setup" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.67, + 0.906, + 0.715 + ], + "angle": 0, + "content": "DA3D is designed to evaluate zero-shot generalization in both novel object categories and novel camera configurations. We define two evaluation settings:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.716, + 0.906, + 0.761 + ], + "angle": 0, + "content": "Zero-Shot Categories. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as unseen classes for zero-shot testing." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.763, + 0.657, + 0.775 + ], + "angle": 0, + "content": "Zero-Shot Datasets." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.778, + 0.905, + 0.808 + ], + "angle": 0, + "content": "- We use Cityscapes3D, Waymo, and 3RScan as unseen datasets with novel camera configurations." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.808, + 0.905, + 0.852 + ], + "angle": 0, + "content": "- Cityscapes3D & Waymo introduce new intrinsics and image styles, challenging models to generalize across different camera setups." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.853, + 0.905, + 0.899 + ], + "angle": 0, + "content": "- 3RScan not only introduces novel camera setups, but also contains unseen object categories, making it useful for testing both category and camera generalization." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.778, + 0.905, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.121, + 0.482, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.301, + 0.483, + 0.33 + ], + "angle": 0, + "content": "Figure 7. Detailed implementation of camera and depth module from UniDepth." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.354, + 0.232, + 0.37 + ], + "angle": 0, + "content": "7. Model Details" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.379, + 0.395, + 0.396 + ], + "angle": 0, + "content": "7.1. Camera and Depth Module Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.401, + 0.483, + 0.445 + ], + "angle": 0, + "content": "This section introduces how the camera module and depth module work, predicting intrinsic and camera-aware depth, also related feature." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.446, + 0.484, + 0.522 + ], + "angle": 0, + "content": "As show in Figure 7, the fused feature \\(\\hat{\\mathbf{F}}_{\\mathrm{fused}}\\) are input into the camera module, which uses a cross-attention mechanism and a to obtain the camera intrinsic parameters. These intrinsic parameters are then used to generate camera rays. The rays are defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.532, + 0.368, + 0.582 + ], + "angle": 0, + "content": "\\[\n(r _ {1}, r _ {2}, r _ {3}) = \\mathbf {K} ^ {- 1} \\left[ \\begin{array}{l} u \\\\ v \\\\ 1 \\end{array} \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.586, + 0.484, + 0.634 + ], + "angle": 0, + "content": "where \\(\\mathbf{K}\\) is the calibration matrix, \\(u\\) and \\(v\\) are the pixel coordinates, and 1 is a vector of ones. In this context, the homogeneous camera rays \\((r_x,r_y)\\) are derived from:" + }, + { + "type": "equation", + "bbox": [ + 0.252, + 0.642, + 0.322, + 0.676 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c} r _ {1} \\\\ \\hline r _ {3} \\end{array} , \\frac {r _ {2}}{r _ {3}}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.681, + 0.483, + 0.741 + ], + "angle": 0, + "content": "This dense representation of the camera rays undergoes Laplace Spherical Harmonic Encoding (SHE) [54] to produce the embeddings \\(\\mathbf{C}\\). These embeddings are then passed to the depth module using the cross-attention mechanism." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.741, + 0.483, + 0.772 + ], + "angle": 0, + "content": "The depth feature conditioned on the camera embeddings, is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.786, + 0.405, + 0.803 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} \\mid \\mathbf {C} = \\operatorname {M L P} (\\operatorname {C r o s s A t t n} (\\mathbf {D}, \\mathbf {C}))\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.809, + 0.483, + 0.84 + ], + "angle": 0, + "content": "Subsequently, the depth feature is processed through an upsampling head to predict the final depth map." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.849, + 0.288, + 0.864 + ], + "angle": 0, + "content": "7.2.3D Box Head Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "This section introduces the details of the 3D box head. After the query \\(\\mathbf{Q}\\) passes through the Geometric Transformer" + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.094, + 0.877, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.616, + 0.271, + 0.802, + 0.286 + ], + "angle": 0, + "content": "Figure 8. 3D Box head details." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.313, + 0.905, + 0.373 + ], + "angle": 0, + "content": "and Two-Way Transformer, the model outputs \\(\\mathbf{O}\\). \\(\\mathbf{O}\\) contains outputs corresponding to both 3D-related hidden states \\(\\mathbf{O}_{3D}\\) and prompt hidden states \\(\\mathbf{O}_p\\). We extract the 3D-related output \\(\\mathbf{O}_{3D}\\) for further processing." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.373, + 0.905, + 0.403 + ], + "angle": 0, + "content": "Subsequently, \\(\\mathbf{O}_{3\\mathrm{D}}\\) is passed through a series of prediction heads as shown in Figure 8." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.404, + 0.906, + 0.493 + ], + "angle": 0, + "content": "We then transform these predictions into the final 3D bounding box parameters and obtain the 3D bounding box \\((x,y,z,w,h,l,R,S)\\) for each detected object, where \\((x,y,z)\\) denotes the 3D center, \\((w,h,l)\\) represent the dimensions, and \\((R,S)\\) describe the rotation and predicted 3D IoU score." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.505, + 0.644, + 0.52 + ], + "angle": 0, + "content": "7.3. Loss Details" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.527, + 0.905, + 0.558 + ], + "angle": 0, + "content": "Depth Loss. The depth module is supervised using the Scale-Invariant Logarithmic (SILog) loss, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.529, + 0.569, + 0.905, + 0.617 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {d e p t h}} = \\sqrt {\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i} ^ {2} - 0 . 1 5 \\cdot \\left(\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i}\\right) ^ {2}} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.63, + 0.905, + 0.662 + ], + "angle": 0, + "content": "where \\(\\Delta d_{i} = \\log (d_{i}^{\\mathrm{pred}}) - \\log (d_{i}^{\\mathrm{gt}})\\), and \\(N\\) is the number of valid depth pixels." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.662, + 0.905, + 0.707 + ], + "angle": 0, + "content": "Camera Intrinsic Loss. The camera error is computed with the dense camera rays. For an image with height \\( H \\) and width \\( W \\), the intrinsic loss is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.524, + 0.732, + 0.905, + 0.781 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {c a m}} = \\sqrt {\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i} ^ {2} - 1 \\cdot \\left(\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i}\\right) ^ {2}} \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.793, + 0.681, + 0.81 + ], + "angle": 0, + "content": "where \\(\\Delta r_{i} = r_{i}^{\\mathrm{pred}} - r_{i}^{\\mathrm{gt}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.811, + 0.905, + 0.84 + ], + "angle": 0, + "content": "Detection Loss. The detection loss consists of three components:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.841, + 0.905, + 0.871 + ], + "angle": 0, + "content": "- Smooth L1 loss for box regression, covering the prediction of center, depth, and dimensions." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.871, + 0.905, + 0.9 + ], + "angle": 0, + "content": "- Chamfer loss for rotation matrix prediction, ensuring accurate orientation estimation." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.841, + 0.905, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.089, + 0.452, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.233, + 0.484, + 0.318 + ], + "angle": 0, + "content": "Figure 9. An example on 3RScan. The left image shows the original 3RScan annotations, while the right image presents the detection results from Grounding DINO after feeding in all the 3RScan labels. Severe naming ambiguities (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotations lead to a substantial decrease in the detector's performance." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.341, + 0.483, + 0.386 + ], + "angle": 0, + "content": "- Mean squared error (MSE) loss for 3D IoU score prediction, which optimizes the confidence estimates of detected objects." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.387, + 0.441, + 0.402 + ], + "angle": 0, + "content": "Combining these terms, the total detection loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.412, + 0.482, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d e t}} = \\mathcal {L} _ {\\mathrm {b o x}} + \\mathcal {L} _ {\\mathrm {r o t}} + \\mathcal {L} _ {\\mathrm {i o u}}, \\tag {12}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.439, + 0.299, + 0.456 + ], + "angle": 0, + "content": "8. Target-aware Metrics" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.464, + 0.483, + 0.569 + ], + "angle": 0, + "content": "In our work, we evaluate both traditional metrics and the target-aware metrics proposed by OVMono3D [74]. Under the target-aware paradigm, rather than prompting the model with all possible classes from an entire dataset, we only prompt it with the classes present in the current image during inference. This is designed to address two key challenges encountered:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.57, + 0.483, + 0.614 + ], + "angle": 0, + "content": "- Missing annotations: Comprehensive 3D annotation is often impractical or prohibitively expensive, leading to incomplete ground-truth annotations." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.615, + 0.483, + 0.66 + ], + "angle": 0, + "content": "- Naming ambiguity: Datasets may label the same objects with inconsistent category names or annotation policies, creating confusion when merging datasets." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.57, + 0.483, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.661, + 0.483, + 0.796 + ], + "angle": 0, + "content": "As illustrated in Figure 9, these issues are especially pronounced in the 3RScan [65] dataset. The left side shows the official 3RScan annotations, while the right side shows detections from Grounding DINO, which are largely misaligned with the dataset's labeling conventions. Consequently, traditional evaluation metrics may yield misleading or inconsistent results, whereas target-aware metrics help mitigate these mismatches by restricting the evaluated classes to those actually present in the scene." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.809, + 0.292, + 0.827 + ], + "angle": 0, + "content": "9. More Ablation Study" + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.834, + 0.36, + 0.851 + ], + "angle": 0, + "content": "9.1. Various Prompts Performance" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.901 + ], + "angle": 0, + "content": "In this section, we evaluate different types of prompts, including box prompts, point prompts, and text prompts, both with and without intrinsic prompts. The results on Omni3D" + }, + { + "type": "table_caption", + "bbox": [ + 0.595, + 0.09, + 0.825, + 0.104 + ], + "angle": 0, + "content": "Table 4. Various Prompt Performance." + }, + { + "type": "table", + "bbox": [ + 0.552, + 0.116, + 0.871, + 0.174 + ], + "angle": 0, + "content": "
Prompt TypeBoxPointText
w/ Intrinsic Prompt34.3825.1922.31
w/o Intrinsic Prompt32.1624.021.02
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.188, + 0.907, + 0.258 + ], + "angle": 0, + "content": "Table 5. Ablation on different backbones. The table reports \\(\\mathrm{AP}_{3\\mathrm{D}}\\) scores. We verify the effectiveness of SAM and DINO along two dimensions: (1) whether or not we use the pretrained SAM parameters, and (2) whether adopt the pretrained DINO backbone or ConvNeXt for the depth module." + }, + { + "type": "table", + "bbox": [ + 0.582, + 0.269, + 0.836, + 0.332 + ], + "angle": 0, + "content": "
Backbonew/ SAMw/o SAM
DINO25.8019.12
ConvNeXt23.1118.27
" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.358, + 0.907, + 0.449 + ], + "angle": 0, + "content": "are presented in Table 4. Each prompt type demonstrates its effectiveness in guiding 3D detection. Besides, on the zero-shot datasets, we observe that omitting intrinsic prompts leads to a significant performance drop (even approaching zero), which further highlights the critical role of intrinsic prompts for reliable depth calibration in unseen scenarios." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.459, + 0.803, + 0.475 + ], + "angle": 0, + "content": "9.2. Ablation on Different Backbones" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.482, + 0.907, + 0.633 + ], + "angle": 0, + "content": "In this section, we investigate our choice of backbone by comparing the use of SAM and DINO backbones. For DINO, we replace it with ConvNeXt and adopt the same pretraining method proposed by UniDepth. For SAM, we examine its effect by removing the SAM-pretrained weights and training from scratch. As shown in Table 5, SAM's pretrained parameters prove crucial for boosting performance. Meanwhile, compared to ConvNeXt, DINO offers richer geometric representations, resulting in stronger 3D detection performance." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.644, + 0.757, + 0.66 + ], + "angle": 0, + "content": "9.3. Ablation on DA3D Dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.667, + 0.906, + 0.727 + ], + "angle": 0, + "content": "We ablate the impact of the DA3D dataset in Tab. 6. The additional data in DA3D primarily improves generalization to novel cameras, as Omni3D contains only two distinctive intrinsics for outdoor scenes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.739, + 0.906, + 0.795 + ], + "angle": 0, + "content": "Table 6. Ablation on training datasets. Unless specified, all models are trained on the Omni3D dataset. For the in-domain setting, prompts are provided by Cube R-CNN, while prompts for novel classes and novel datasets are generated by Grounding DINO." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.806, + 0.905, + 0.882 + ], + "angle": 0, + "content": "
MethodIn-domain\nAPommi3d\n3DNovel ClassNovel Camera
APkit\n3DAPsun\n3DAPcity\n3DAP3rs\n3D
Cube R-CNN23.26--8.22 / --
OVMono3D22.984.71 / 4.714.07 / 16.785.88 / 10.980.37 / 8.48
DetAny3D24.3323.75 / 23.757.63 / 20.878.31 / 11.680.64 / 9.56
DetAny3DDA3D24.9225.73 / 25.737.63 / 21.0711.05 / 15.710.65 / 9.58
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.345, + 0.108 + ], + "angle": 0, + "content": "9.4. Ablation on Inference Speed" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.113, + 0.484, + 0.219 + ], + "angle": 0, + "content": "We compare the inference speed of DetAny3D with prior methods in Table 7. DetAny3D runs at 1.5 FPS on a single KITTI image, which is slower than Cube R-CNN (33.3 FPS) and OVMono3D (7.1 FPS). This is a trade-off for stronger generalization across novel categories and cameras, as DetAny3D is designed as a foundation model rather than for real-time deployment." + }, + { + "type": "table_caption", + "bbox": [ + 0.145, + 0.232, + 0.429, + 0.246 + ], + "angle": 0, + "content": "Table 7. Inference speed comparison on KITTI." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.257, + 0.476, + 0.306 + ], + "angle": 0, + "content": "
MethodCube R-CNNOVMono3DDetAny3D
FPS ↑33.37.11.5
" + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.329, + 0.461, + 0.346 + ], + "angle": 0, + "content": "9.5. Per-category Performance on Novel Classes" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.351, + 0.484, + 0.427 + ], + "angle": 0, + "content": "As shown in Table 8, we provide a detailed comparison of per-category \\(\\mathrm{AP}_{3\\mathrm{D}}\\) on novel classes from the KITTI, SUNRGBD, and ARKitScenes datasets between our DetAny3D and the baseline OVMono3D. DetAny3D shows consistent improvements across most categories." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.44, + 0.223, + 0.456 + ], + "angle": 0, + "content": "10. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.466, + 0.484, + 0.586 + ], + "angle": 0, + "content": "Text Prompt Process. Our method leverages open-vocabulary 2D detectors such as Grounding DINO to convert text prompts into 2D box prompts. While effective, this strategy may cause semantic loss, as textual nuances are not directly injected into the 3D detection pipeline. Moreover, 2D detectors are known to perform poorly under heavy occlusion or partial visibility, introducing a domain gap when transferring their outputs to 3D tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.587, + 0.484, + 0.676 + ], + "angle": 0, + "content": "Inference Efficiency. Although DetAny3D achieves strong generalization across novel categories and camera settings, its inference speed (1.5 FPS) is significantly slower than existing lightweight 3D detectors. This limits its applicability in latency-sensitive scenarios such as real-time robotics or autonomous driving." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.677, + 0.484, + 0.783 + ], + "angle": 0, + "content": "Lack of Temporal Modeling. Our current design operates on single-frame inputs and does not utilize temporal information from video sequences. Incorporating motion cues and enforcing temporal consistency could potentially improve detection accuracy and enable better integration into downstream video-based tasks, such as video knowledge distillation and temporal grounding." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.796, + 0.303, + 0.814 + ], + "angle": 0, + "content": "11. Licenses and Privacy" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.821, + 0.482, + 0.852 + ], + "angle": 0, + "content": "All data used in this work are obtained from publicly available datasets and are subject to their respective licenses." + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.257, + 0.905, + 0.285 + ], + "angle": 0, + "content": "Table 8. Per-category target-aware \\(\\mathrm{AP}_{3\\mathrm{D}}\\) comparison on novel classes between DetAny3D and OVMono3D." + }, + { + "type": "table", + "bbox": [ + 0.548, + 0.296, + 0.875, + 0.732 + ], + "angle": 0, + "content": "
CategoryOVMono3DDetAny3D
Board4.836.02
Printer16.2360.22
Painting2.805.11
Microwave30.3157.21
Tray10.116.70
Podium48.3773.65
Cart47.3133.46
Tram4.7127.90
Easy Categories20.5833.79
Monitor9.4415.95
Bag15.6117.69
Dresser29.0841.75
Keyboard9.139.52
Drawers43.0440.80
Computer7.4412.37
Kitchen Pan9.988.70
Potted Plant6.6626.34
Tissues12.4512.95
Rack10.219.04
Toys5.2416.14
Phone3.894.42
Soundsystem13.226.21
Fireplace13.1630.75
Hard Categories13.4718.05
All Categories16.0523.77
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..616c49c10a823d5023b029c9afeb01122404e5e9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8b704a482d79f94bb5fa336d85dea6f0f2f7c6d5a7ec148dbcc86d559209f4b +size 9994707 diff --git a/data/2025/2504_07xxx/2504.07958/full.md b/data/2025/2504_07xxx/2504.07958/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7333e1afbf5064fe214b1b0e8c0d902e0da4460c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/full.md @@ -0,0 +1,538 @@ +# Detect Anything 3D in the Wild + +Hanxue Zhang $^{1,2*}$ , Haoran Jiang $^{1,3*}$ , Qingsong Yao $^{4*}$ , Yanan Sun $^{1}$ , Renrui Zhang $^{5}$ , Hao Zhao $^{6}$ , Hongyang Li $^{1}$ , Hongzi Zhu $^{2}$ , Zetong Yang $^{1,7}$ + +1 OpenDriveLab at Shanghai AI Laboratory 2 Shanghai Jiao Tong University 3 Fudan University 4 Stanford University 5 CUHK MMLab 6 Tsinghua University 7 GAC R&D Center + +https://github.com/OpenDriveLab/DetAny3D + +![](images/299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg) +Figure 1. Introducing DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object with arbitrary monocular images in diverse scenes. Our framework enables multi-prompt interaction (e.g., box, point, and text) to deliver open-world 3D detection results ( $w \times h \times l$ in centimeter) for novel objects across various domains. It achieves significant zero-shot generalization, outperforming SOTA by up to 21.02 and 5.68 AP3D on novel categories and novel datasets with new camera configurations. + +# Abstract + +Despite the success of deep learning in close-set 3D object detection, existing approaches struggle with zero-shot generalization to novel objects and camera configurations. We introduce DetAny3D, a promptable 3D detection foundation model capable of detecting any novel object under arbitrary camera configurations using only monocular inputs. Training a foundation model for 3D detection is fun + +damentally constrained by the limited availability of annotated 3D data, which motivates DetAny3D to leverage the rich prior knowledge embedded in extensively pre-trained 2D foundation models to compensate for this scarcity. To effectively transfer 2D knowledge to 3D, DetAny3D incorporates two core modules: the 2D Aggregator, which aligns features from different 2D foundation models, and the 3D Interpreter with Zero-Embedding Mapping, which stabilizes early training in 2D-to-3D knowledge transfer. Experimental results validate the strong generalization of our DetAny3D, which not only achieves state-of-the-art performance on unseen categories and novel camera configura + +tions, but also surpasses most competitors on in-domain data. DetAny3D sheds light on the potential of the 3D foundation model for diverse applications in real-world scenarios, e.g., rare object detection in autonomous driving, and demonstrates promise for further exploration of 3D-centric tasks in open-world settings. More visualization results can be found at our code repository. + +# 1. Introduction + +3D object detection is a fundamental technology for autonomous systems [12, 14, 15, 36, 48, 49], robotics [6, 67, 84], and augmented reality [43, 52]. 3D perception not only enables machines to perceive and interact with the physical world, but also serves as a foundational input for more advanced tasks, such as behavior decision [3, 11, 20, 31], world modeling [22, 23, 38] and 3D scene reconstruction [50, 73, 75]. For practical deployment, a generalizable 3D detector ideally should detect arbitrary objects from easily accessible inputs, such as monocular images, without relying on specific sensor parameters. Such a model would be highly adaptable and reliable for various downstream tasks in diverse and unpredictable environments [15, 36, 43, 84]. Also, accurate detection results provided by such a detector (e.g., generating 3D bounding boxes for even images from the internet) make it a versatile tool, paving the way for scalable 3D systems that leverage Internet-scale data and advance toward open-world scenarios [22, 23, 38, 50, 73]. + +Previous research, exemplified by Omni3D [8], has attempted to improve the generalization of the 3D detection system through multi-dataset training [8, 35, 40, 68]. However, despite utilizing large datasets to train a unified detector [8, 40], these approaches provide limited generalization to novel camera configurations and cannot detect unseen object categories beyond predefined label spaces. Therefore, developing a 3D detection foundation model with strong zero-shot generalizability, which is capable of detecting any unseen object under arbitrary camera configurations, remains a crucial and unsolved problem. + +While recent advances in 2D foundation models [33, 44, 51, 56] demonstrate remarkable zero-shot capabilities. Segment Anything Model (SAM) [33] features a promptable inference mechanism, supporting user-friendly prompts like points and boxes to segment user-specified objects. Their impressive generalization ability stems from training on billions of annotated images. However, in 3D object detection, the available labeled data is limited to only millions of samples—typically 3-4 orders of magnitude smaller than in 2D images. Such severe data scarcity [74, 86] poses a fundamental challenge, making it nearly infeasible to train a 3D foundation model from scratch. + +In this work, we present DetAny3D, a promptable 3D detection foundation model designed for generalizable 3D + +object detection using only monocular images (see Figure 1). Given the inherent scarcity of 3D annotated data, we achieve strong generalization from two critical perspectives: model architecture and data utilization. The central insight of our approach is to leverage the extensive prior knowledge encoded within two broadly pre-trained 2D foundation models—SAM [33] and DINO [10, 51)—thus unlocking effective zero-shot 3D detection capabilities with minimal available 3D data. + +Specifically, we adopt SAM as our promptable backbone, capitalizing on its versatile and robust object understanding capability derived from large-scale 2D data. Concurrently, we utilize DINO [51] depth-pretrained by UniDepth [54], to offer redundant 3D geometric priors [7, 76], which plays a pivotal role for accurate 3D detection in a monocular setting. To integrate the complementary features from SAM and DINO more effectively, we propose the 2D Aggregator, an attention-based mechanism that aligns these features and dynamically optimizes their contributions via learnable gating. 2D Aggregator fully exploits the strengths of each foundation model. + +To further address the challenge of effectively transferring knowledge from 2D to 3D, we introduce the 3D Interpreter. Central to the 3D Interpreter is the Zero-Embedding Mapping (ZEM) mechanism, which ensures stable 2D-to-3D mapping by reducing early-stage interference and preserving pretrained 2D priors. By stabilizing the training process across diverse datasets with varying camera parameters, scene complexities, and depth distributions, the ZEM mechanism enables progressive zero-shot 3D grounding capabilities, significantly enhancing model generalization. + +To leverage as much 3D-related data as possible, we aggregate a diverse range of datasets, including 16 datasets spanning depth with intrinsic data and 3D detection data, referred as DA3D. Experimental results, using prompts aligned with the baselines, demonstrate three key advantages: (1) Generalization to novel classes: achieves $21.0\%$ , $4.3\%$ , $11.3\%$ higher zero-shot $\mathrm{AP}_{3\mathrm{D}}$ than baselines on novel categories on KITTI, SUNRGBD, and ARKitScenes. (2) Generalization to novel cameras: improves cross-dataset performance by $4.7\%$ , $5.7\%$ and $1.1\%$ $\mathrm{AP}_{3\mathrm{D}}$ compared to baseline methods on zero-shot datasets Cityscapes3D, Waymo and 3RScan. (3) Performance on in-domain data: surpasses baseline by $1.6\%$ $\mathrm{AP}_{3\mathrm{D}}$ on Omni3D. Core contributions are summarized in following: + +- We develop DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object in real-world scenarios with arbitrary monocular inputs. +- DetAny3D introduces 2D Aggregator to effectively fuse the features from two 2D foundation models SAM and depth-pretrained DINO, which provide pivot shape and 3D geometric priors for various objects, respectively. +- In 2D-to-3D knowledge transfer, DetAny3D involves + +Zero-Embedding Mapping in 3D Interpreter to achieve stable 2D-to-3D mapping, enabling the model to train robustly across datasets with diverse camera parameters, varying scenes, and different depth distributions. + +- The experimental results demonstrate significant advantages of DetAny3D, particularly in accurately detecting unseen 3D objects with arbitrary camera parameters in the zero-shot setting, showcasing its potential across a wide range of real-world applications. + +# 2. Related works + +# 2.1. 3D Object Detection + +Existing 3D object detection systems have predominantly focused on single-dataset optimization, achieving strong performance on benchmark datasets like KITTI [24] and nuScenes [9] through task-specific architectures [14, 18, 39, 41, 42, 45, 66, 80]. While effective in constrained scenarios, these approaches suffer from significant domain gaps when deployed in new contexts, primarily due to their reliance on limited sensor-specific data and closed-set assumptions. Recent works, exemplified by Omni3D [8], have demonstrated the potential of multi-dataset training. Models like Cube R-CNN [8] and UniMODE [40] train a universal monocular 3D detector across multiple datasets, achieving some level of robustness to camera parameters, but are still restricted to predefined classes. V-MIND [32] further addresses the data scarcity challenge by generating pseudo 3D training data from large-scale 2D annotations. Towards more general detection, OV-Uni3DETR [69] pioneers openset detection that is able to detect with multimodal inputs, but it is trained separately for indoor and outdoor domains, thereby limiting its overall generalization. More recently, OVMono3D [74] leverages Grounding DINO's [44] 2D results with a 3D head on unified datasets. However, it does not fully exploit the priors contained in 2D foundation models, leading to performance constraints tied to the limited 3D data. In contrast, our approach fully capitalizes on the knowledge distilled in 2D foundation models while leveraging abundant 3D-related data, thereby enabling the detection of any 3D object from arbitrary monocular inputs. + +# 2.2. Vision Foundation Models + +Foundation models have demonstrated significant potential across various domains. For example, language foundation models such as GPT-4 [1] and DeepSeek [5, 26], trained on massive internet-scale corpora, have achieved impressive capabilities in natural language processing across diverse fields [1, 5, 60, 63, 81, 82]. Similarly, foundation models in the vision domain have made remarkable strides [29, 33, 37, 44, 51, 56, 79]. DINOv2 [51], trained on a vast range of curated data from diverse sources, is capable of producing general-purpose visual features that work + +seamlessly across different image distributions and tasks. SAM [33] has taken a step further in the vision domain by introducing promptability, enabling models to generalize to novel visual concepts through large-scale data training and continuous model refinement. In recent years, the development of foundation models in the 3D domain has started to take initial steps [13, 28, 55, 78, 83, 85]. Most existing 3D foundation models are often combined with vision-language models (VLMs) [13, 27, 55, 85], relying on point clouds as input to help the language models understand 3D [13, 85]. While these methods are valuable for scene understanding and semantic tasks, they do not directly provide precise 3D detection results. Moreover, point cloud inputs significantly restrict the use cases [72], as they are not always accessible in many practical scenarios. In contrast to these approaches, we aim to develop a foundation model specifically dedicated to 3D detection tasks with the most general inputs, monocular images. By leveraging the powerful priors from 2D vision foundation models, our approach enables the detection of any 3D object with arbitrary camera configurations, presenting a broad range of practical applications. + +# 3. Detect Anything 3D in the Wild + +# 3.1. Overview + +As illustrated in Figure 2(a), DetAny3D takes a monocular RGB image and prompts (e.g., boxes, points, text, intrinsic) as input. The box, point, and text prompts are used to specify objects, while the intrinsic prompts are optional. When not provided, the model predicts the intrinsic parameters and the corresponding 3D detection results. If intrinsic are available, the model can leverage them as geometric constraints to mitigate the ill-posed nature of monocular depth estimation and calibrate its detection results. + +Specifically, the monocular image is embedded in parallel by two foundational models: SAM [33] for low-level pixel information, underpins the entire promptable architecture. And depth-pretrained DINO [51, 54], which provide rich high-level geometric knowledge, excels in depth-related tasks. These complementary 2D features are then fused through our proposed 2D Aggregator (see Figure 2(b)), which hierarchically aligns low-level and high-level information using cross-attention layers. The fused features are subsequently passed to the Depth/Camera Module, which extracts the camera and camera-aware depth embedding, collectively referred to as geometric embedding. + +The geometric embedding and the 3D bounding box tokens with encoded prompt tokens are then fed into the 3D Interpreter (see Figure 2(c)), which employs a structure similar to the SAM decoder along with a specialized Zero-Embedding Mapping (ZEM) mechanism. 3D Interpreter injects 3D geometric features while ensuring stable 2D-to + +![](images/91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg) + +![](images/4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg) +Figure 2. Overview of DetAny3D. It supports arbitrary monocular images as input and performs 3D object detection driven by prompts—box, point, and text to specify target objects and optional camera calibration to calibrate geometric projections. DetAny3D comprises two key modules: (b) 2D Aggregator, which employs a hierarchical cross-attention mechanism to dynamically fuse knowledge from SAM and DINO, with a learnable gate controlling each component's contribution to the geometric embedding; (c) 3D Interpreter, which introduces a Zero-Embedding Mapping (ZEM) strategy based on zero-initialized layers to gradually inject geometric priors, thereby enables zero-shot 3D grounding and avoids catastrophic forgetting during knowledge transfer. + +3D knowledge transfer, enabling progressive 3D grounding across diverse data domains. Finally, the model predicts 3D boxes based on the hidden states of the 3D box tokens. Our DetAny3D is trained on selected seen classes and can detect any unseen classes in a zero-shot manner. + +# 3.2.2D Aggregator + +To effectively fuse multiple foundation models, we propose 2D Aggregator to aggregate features from SAM and DINO, mitigating potential conflicts between their heterogeneous representations. As illustrated in Figure 2(b), the 2D Aggregator fuses features from SAM and DINO in a hierarchical manner, progressively integrating spatial and geometric information across four cascaded alignment units. + +Feature Extraction. Given an input image, the SAM encoder extracts high-resolution spatial features $\mathbf{F}_s\in$ $\mathbb{R}^{H_s\times W_s\times C}$ , capturing fine-grained details and boundaries. Simultaneously, the DINO encoder outputs geometry-aware embeddings $\mathbf{F}_d\in \mathbb{R}^{H_d\times W_d\times C}$ , which is depth-pretrained by UniDepth [54] and provides robust priors for depth and intrinsics. Following the design of ViT Adapter [16], we also employ a convolutional structure to produce preliminary image features, denoted as $\mathbf{F}_q^0$ , serving as the initial query for subsequent attention-based fusion. + +Hierarchical Fusion. Each of the four alignment units fuses SAM and DINO features via cross-attention. In the $i$ -th unit, we first apply learnable gating weights $\alpha_{i}$ (initial + +ized to 0.5) to combine the $i$ -th block of SAM features $\mathbf{F}_s^i$ and DINO features $\mathbf{F}_d^i$ as follows: + +$$ +\mathbf {F} _ {\text {f u s e d}} ^ {i} = \alpha_ {i} \cdot \mathbf {F} _ {s} ^ {i} + (1 - \alpha_ {i}) \cdot \mathbf {F} _ {d} ^ {i}. \tag {1} +$$ + +We use $\mathbf{F}_{\mathrm{fused}}^{i}$ as key and value, while the query feature $\mathbf{F}_q^{i - 1}$ acts as the query in the cross-attention mechanism: + +$$ +\mathbf {F} _ {q} ^ {i} = \operatorname {C r o s s A t t n} \left(\mathbf {F} _ {q} ^ {i - 1}, \mathbf {F} _ {\text {f u s e d}} ^ {i}, \mathbf {F} _ {\text {f u s e d}} ^ {i}\right), \tag {2} +$$ + +$$ +\hat {\mathbf {F}} _ {\text {f u s e d}} ^ {i} = \operatorname {N o r m} \left(\mathbf {F} _ {\text {f u s e d}} ^ {i} + \mathbf {F} _ {q} ^ {i}\right). \tag {3} +$$ + +This design enables the model to dynamically emphasize SAM's spatial details or DINO's semantic and geometric cues at different hierarchy levels while minimizing interference between the two representations. + +Geometric Embeddings. The fused features $\hat{\mathbf{F}}_{\mathrm{fused}}^i$ , $i \in [1,2,3,4]$ , are subsequently processed by the depth and camera modules, following the UniDepth [54] architecture. Specifically, these modules predict the camera embedding $\mathbf{C}$ and camera-aware depth embedding $\mathbf{D}|\mathbf{C}$ , referred as the geometric embedding $\mathbf{G} = \{\mathbf{D}|\mathbf{C}, \mathbf{C}\}$ . These modules provide aligned depth and camera parameters under the monocular depth ill-posed problem. Further details can be found in the Supplementary material Section 7.1. + +Overall, by progressively aligning multi-scale features and adaptively integrating their contributions, 2D Aggregator effectively leverages the strengths of both foundation models while minimizing potential conflicts. + +# 3.3. 3D Interpreter + +The diverse 3D object supervisions across various scenarios, depths, and camera intrinsics introduce challenges to model training. Our 3D Interpreter aims to progressively integrate geometric information while ensuring stable 2D-to-3D knowledge transfer. We introduce Zero-Embedding Mapping (ZEM) mechanism, which incrementally infuses 3D geometry into the decoder via zero-initialized layers—without disrupting the original 2D features. As Figure 2(c) shows, the 3D Interpreter comprises three main components: the Two-Way Transformer, the Geometric Transformer, and the 3D bounding box heads. + +Two-Way Transformer. Following the SAM design, we first concatenate the 3D bounding box tokens with prompt-related tokens to form the query: + +$$ +\mathbf {Q} = \left[ \left[ \mathbf {T} _ {\mathrm {3 D}, 1}; \mathbf {T} _ {\mathrm {p}, 1} \right], \dots , \left[ \mathbf {T} _ {\mathrm {3 D}, N}; \mathbf {T} _ {\mathrm {p}, N} \right] \right], \tag {4} +$$ + +where $\mathbf{T}_{3\mathrm{D},i}$ denotes the 3D bounding box token for the $i$ -th object, $\mathbf{T}_{\mathrm{p},i}$ is the prompt-related token, and $[\cdot ;\cdot ]$ denotes vector concatenation. The SAM encoder output $\mathbf{F}_s$ serves as both key and value for the first Two-Way Transformer layer, yielding: + +$$ +\mathbf {F} _ {s} ^ {\prime} = \text {T w o W a y T r a n s} (\mathbf {Q}, \mathbf {F} _ {s}, \mathbf {F} _ {s}). \tag {5} +$$ + +The initialized parameters of two-way transformer are copied using pre-trained SAM decoder. + +Geometric Transformer. We then process the geometric embedding $\mathbf{G}$ (from the 2D Aggregator) through the zero-initialized $1 \times 1$ convolutional layer ZEM and add it to $\mathbf{F}_s$ for use as key and value in the Geometric Transformer: + +$$ +\mathbf {G} ^ {\prime} = \operatorname {G e o T r a n s} (\mathbf {Q}, \operatorname {Z E M} (\mathbf {G}) + \mathbf {F} _ {s}, \operatorname {Z E M} (\mathbf {G}) + \mathbf {F} _ {s}). \tag {6} +$$ + +ZEM integrates the geometric embedding and avoids catastrophic forgetting in 2D features. Next, $\mathbf{G}'$ is again passed through ZEM and combined with $\mathbf{F}_s'$ . This enriched representation is used as key and value in the second Two-Way Transformer layer to generate object features $\mathbf{O}$ : + +$$ +\mathbf {O} = \text {T w o W a y T r a n s} \left(\mathbf {Q} ^ {\prime}, \operatorname {Z E M} \left(\mathbf {G} ^ {\prime}\right) + \mathbf {F} _ {s} ^ {\prime}, \operatorname {Z E M} \left(\mathbf {G} ^ {\prime}\right) + \mathbf {F} _ {s} ^ {\prime}\right). \tag {7} +$$ + +ZEM also helps stabilize parameter updates in the two-way and geometric transformer training, preventing conflicts arising from diverse 3D object supervision. + +3D Bounding Box Heads. Finally, $\mathbf{O}$ is fed into the 3D bounding box heads to calculate the final predictions, which follows typical architectures from standard 3D detection frameworks [8, 66, 80]: $B_{\mathrm{3D}}(x,y,z,w,h,l,R,S)$ where $x,y,z$ specify the 3D box center, $w,h,l$ are its dimensions, $R$ is the rotation matrix, and $S$ is the predicted 3D Intersection over Union (IoU) score. + +# 3.4. Loss + +Our loss function comprises three components, the depth loss $\mathcal{L}_{\mathrm{depth}}$ , the camera intrinsic loss $\mathcal{L}_{\mathrm{cam}}$ , and the detection loss $\mathcal{L}_{\mathrm{det}}$ . The overall loss is defined as the sum of these three components. For depth loss $\mathcal{L}_{\mathrm{depth}}$ , we adopt the commonly used SILog loss [19, 64] to supervise depth prediction. For camera intrinsic loss $\mathcal{L}_{\mathrm{cam}}$ , we follow the dense camera ray approach [30, 54] to represent intrinsics and also employ the SILog loss to measure deviations between predicted and ground-truth parameters. At last, for detection loss $\mathcal{L}_{\mathrm{det}}$ , we use the smooth L1 loss [40, 66, 80] to regress 3D bounding boxes parameters and predicted IOU scores and the Chamfer loss [8, 74] for rotation matrices. Detailed formulations of these loss functions can be found in the supplementary material Section 7.3. + +# 3.5. Prompt Interaction + +DetAny3D supports point, box, and text prompts to detect 3D box for user-specified objects. To calibrate more precise depth for specific camera, DetAny3D allows users to specify the camera configuration via the intrinsic prompt. + +Box and Point Prompts. Following SAM's methodology, both box and point prompts are encoded based on their respective positions and embeddings. For the box prompt, two points (top-left and bottom-right corners) are used. The point prompt is derived by combining the positional encoding of the point and the corresponding embedding. + +Text Prompts. Recent 2D foundation models like Grounding DINO [44] are able to detect bounding box for the open-vocabulary object specified by users using text prompt. DetAny3D can further generate 3D bounding box using the prediction of Grounding DINO, which enables text as prompts in the zero-shot interface. + +Intrinsic Prompts. Unlike most existing 3D detectors that employ a fixed virtual camera and rely on GT intrinsics to recover the true depth, inspired by Unidepth, we predict intrinsics for camera-aware 3D detection. When no intrinsic prompt is given, the model infers intrinsics for outputs: + +$$ +\operatorname {B o x} _ {3 D} = 3 \text {D I n t e r p r e t o r} (\mathbf {Q}, \hat {\mathbf {G}}, \mathbf {F} _ {s}), \tag {8} +$$ + +where $\hat{\mathbf{G}} = \{\mathbf{D}|\hat{\mathbf{C}},\hat{\mathbf{C}}\}$ , $\hat{\mathbf{C}}$ is the predicted camera embedding, and $\mathbf{D}|\hat{\mathbf{C}}$ is the depth embedding conditioned on the predicted camera embedding. When intrinsic prompts are given, the model refines the 3D detection results based on the true intrinsic: + +$$ +\operatorname {B o x} _ {3 D} = 3 \mathrm {D I n t e r p r e t o r} (\mathbf {Q}, \mathbf {G}, \mathbf {F} _ {s}), \tag {9} +$$ + +where $\mathbf{G} = \{\mathbf{D}|\mathbf{C},\mathbf{C}\}$ . This boosts performance on both intrinsic prediction and 3D detection since the model continuously predicts and aligns the intrinsic with the 3D detection rather than estimating it solely from input image. + +# 4. Experiment + +# 4.1. Experimental Setup + +DA3D Benchmark. We present DA3D, a unified 3D detection dataset that aggregates 16 diverse datasets for 3D detection and depth estimation. Building upon Omni3D's original datasets (Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]), we incorporate additional four outdoor detection datasets (Argoverse2 [70], A2D2 [25], Waymo [62], Cityscapes3D [21]), one indoor detection dataset (3RScan [65]), and five depth and intrinsic datasets (Scannet [17], Taskonomy [77], DrivingStereo [71], Middlebury [59], IBIMS-1 [34]). All data is standardized with monocular images, camera intrinsics, 3D bounding boxes, and depth maps. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as zero-shot test classes. We select Cityscapes3D, Waymo, and 3RScan as our zero-shot datasets with novel camera configurations, where 3RScan also contains novel object categories. Depth supervision from LiDAR, RGB-D, and stereo sensors enhances $75\%$ of training samples, while intrinsic parameters cover 20 camera configurations across 0.4 million frames $(2.5\times$ Omni3D's scale). Dataset statistics and splits are detailed in Supplementary material Section 6. All data are subject to their respective licenses. + +Baselines. We choose Cube R-CNN [8] and OV-Mono3D [74] as our primary baselines, as their settings align most closely with our experimental protocol: Cube R-CNN is a benchmark provided by the Omni3D dataset. It is a unified detector capable of performing detection on predefined categories. OVMono3D is a recently available open-vocabulary 3D detector on the Omni3D dataset. It lifts 2D detection to 3D by connecting the open-vocabulary 2D detector Grounding DINO [44] with a detection head. + +Metrics. We adopt the metrics in the Omni3D benchmark [8], which is Average Precision (AP). Predictions are matched to ground-truth by measuring their overlap using IoU3D, which computes the intersection-over-union (IoU) of 3D cuboids. The IoU3D thresholds range from $\tau \in [0.05, 0.10, \dots, 0.50]$ . For experiments using text prompts, we additionally employ target-aware metrics from OVMono3D [74]: Prompt the detector only with category names present in the per-image annotations instead of providing an exhaustive category list. This addresses severe naming ambiguity (e.g., "trash can" vs. "rubbish bin") and missing annotation issues prevalent in indoor datasets like 3RScan (see Supplementary material Section 8.). + +Implementation Details. We implement DetAny3D via PyTorch [53]. We use the pretrained ViT-L DINOv2 [51, 54] and ViT-H SAM [33] as our initial models, with SAM serving as the promptable backbone, where the encoder is frozen during training. All main experiments are conducted + +using 8 NVIDIA A100 machines with 8 GPUs for each and a batch size of 64. The model is trained for 80 epochs, taking approximately 2 weeks to complete. The training uses the AdamW [47] optimizer with an initial learning rate of 0.0001, adjusted according to the cosine annealing policy [46]. During box prompt training, we apply a 0.1 positional offset disturbance. For point prompt training, points are randomly selected from the mask. Text prompts are converted into box prompts via Grounding DINO SwinT [44]. For fair comparisons, all baseline-related experiments incorporate intrinsic prompts and use aligned prompt inputs. + +# 4.2. Main Results + +Zero-shot Category Performance. In this experiment, we use two sources for the prompt input: text prompt processed by Grounding DINO and box prompt from ground-truth 2D bounding box. We evaluate our model on KITTI, SUNRGBD, and ARKitScenes datasets with the same zero-shot categories as OVMono3D [74]. As shown in Table 1 (left), our DetAny3D demonstrates superior zero-shot adaptation performance compared to the OVMono3D baseline. When using Grounding DINO for text prompt input, our method achieves significant improvements of $21.02\mathrm{AP}_{3\mathrm{D}}$ on KITTI, $4.29\mathrm{AP}_{3\mathrm{D}}$ on SUNRGBD, and $11.35\mathrm{AP}_{3\mathrm{D}}$ on ARKitScenes under the target-aware metric. When using 2D ground-truth as box prompt input, DetAny3D attains $28.96\mathrm{AP}_{3\mathrm{D}}$ on KITTI, $39.09\mathrm{AP}_{3\mathrm{D}}$ on SUNRGBD, and $57.72\mathrm{AP}_{3\mathrm{D}}$ on ARKitScenes, showing $3.4\times$ , $2.3\times$ , and $4.1\times$ gains over the baseline, respectively. This substantial performance gap highlights our method's enhanced ability to generalize to novel object categories. + +Zero-shot Camera Performance. To assess robustness against novel camera parameters, we conduct cross-dataset evaluation as shown in Table 1 (right). For Cityscapes3D and Waymo, We use Cube R-CNN's 2D detections and ground-truth as box prompt and Grounding DINO processed text prompt for comparison. For 3RScan, due to namespace inconsistency with Cube R-CNN's predefined categories and the presence of novel classes, we only use text prompt and ground-truth box prompts, benchmarking against OVMono3D. DetAny3D exhibits strong adaptation to unseen camera configurations. When using Cube R-CNN-aligned prompts, our model achieves $\mathrm{AP}_{3\mathrm{D}}$ scores of 10.33 and 15.17 on Cityscapes3D and Waymo, respectively, surpassing Cube R-CNN by +2.11 and +5.74. With text prompts, under identical settings as OVMono3D [74], our method improves $\mathrm{AP}_{3\mathrm{D}}$ by +4.73 on Cityscapes3D, +5.68 on Waymo, and +1.1 on 3RScan under target-aware metrics. Both models show low scores on conventional metrics for 3RScan due to severe naming ambiguity and missing annotations. Using 2D ground-truth as box prompts, DetAny3D attains $\mathrm{AP}_{3\mathrm{D}}$ of 16.88, 15.83, and 21.36 across the three datasets, outperforming OVMono3D by +6.82, +5.6, + +Table 1. Zero-shot 3D detection performance comparison on novel categories (left) and novel cameras (right). Results report $\mathrm{AP}_{\mathrm{3D}}$ with different prompt strategies: (1) Cube R-CNN, (2) Grounding DINO outputs (traditional metric / target-aware metric) and (3) Ground Truth. Target-aware metric uses per-image existing categories for prompting. + +
PromptMethodNovel CategoriesNovel Cameras
APkit3DAPsun3DAPpark3DAPcity3DAPwym3DAP3rs3D
-Cube R-CNN [8]---8.229.43-
Cube R-CNNOVMono3D [74]---4.9710.89-
DetAny3D (ours)---10.3315.17-
Δ---+5.36+4.28-
Grounding DINOOVMono3D [74]4.71 / 4.714.07 / 16.7813.21 / 13.215.88 / 10.989.20 / 10.270.37 / 8.48
DetAny3D (ours)25.73 / 25.737.63 / 21.0724.56 / 24.5611.05 / 15.7115.38 / 15.950.65 / 9.58
Δ+21.02 / +21.02+3.56 / +4.29+11.35 / +11.35+5.17 / +4.73+6.18 / +5.68+0.28 / +1.10
Ground TruthOVMono3D [74]8.4417.1614.1210.0610.2318.05
DetAny3D (ours)28.9639.0957.7216.8815.8321.36
Δ+20.52+21.93+43.60+6.82+5.60+3.31
+ +Table 2. In-domain performance comparison between DetAny3D and baselines. The first three columns show results trained only on NuScenes and KITTI, while the next seven columns show results trained on the unified dataset. Two prompt sources are used: (1) Cube R-CNN 2D detections, (2) Ground Truth. + +
MethodOmni3D_OUTOMni3D
APkit3D↑APnus3D↑APout3D↑APkit3D↑APnus3D↑APsun3D↑APark3D↑APobj3D↑APhyp3D↑AP3D↑
ImVoxelNet [58]23.523.421.5------9.4
SMOKE [45]25.920.420.0------10.4
OV-Uni3DETR [68]35.133.031.6-------
Cube R-CNN [8]36.032.731.932.5030.0615.3341.7350.847.4823.26
OVMono3D [74]w/Cube RCNN---25.4524.3315.2041.6058.877.7522.98
DetAny3D (ours)w/Cube RCNN35.833.932.231.6130.9718.9646.1354.427.1724.92
OVMono3D [74]w/Ground Truth---33.6923.7927.8340.8556.6411.9925.32
DetAny3D (ours)w/Ground Truth38.036.735.938.6837.5546.1450.6256.8215.9834.38
+ +and $+3.31$ , respectively. These results highlight the effectiveness of our architecture and its potential for real-world applications with arbitrary camera configurations. + +In-domain Performance We also evaluate our model's in-domain detection capability using two prompt sources: 2D detections from Cube R-CNN and 2D ground-truth. Besides the unified model, we also train DetAny3D on Omni3D_out for comparison. As shown in Table 2, DetAny3D achieves competitive results with Cube R-CNN when provided with aligned input. Using GT prompts, DetAny3D outperforms OVMono3D by $9.06\mathrm{AP}_{3\mathrm{D}}$ , indicating that Cube R-CNN may bottleneck performance, and stronger 2D prompts could further boost results. + +# 4.3. Possible Applications of DetAny3D + +Other than robustly detecting diverse corner cases in real-world tasks such as autonomous driving and embodied perception, DetAny3D's open-world detection results can further serve as inputs for advanced downstream tasks. + +3D Bounding Box Guided Video Generation. We feed DetAny3D outputs into Sora for zero-shot, open-world 3D box guided video generation. As shown in Figure 3, we compare: (i) image + 3D box + text, (ii) image + 2D box + + +![](images/ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg) +Figure 3. Zero-Shot Transfer Video Generation via Sora. We provide Sora with Internet-sourced images. As shown, when controlled with 3D bounding box, Sora can better capture the scene's geometric relationships. In contrast, with only controlled by 2D bounding box prompt, Sora respects pixel-level spatial cues but fails to generate accurate geometric offset. + +![](images/6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg) + +![](images/8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg) + +![](images/84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg) + +![](images/163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg) +Figure 4. Qualitative Results. We present qualitative examples from open-world detection. In each pair of images, the top row is produced by OVMono3D, and the bottom row by DetAny3D. For each example, the left sub-figure overlays the projected 3D bounding boxes, while the right sub-figure shows the corresponding bird's-eye view with $1\mathrm{m} \times 1\mathrm{m}$ grids as the background. + +Table 3. Ablation study of DetAny3D. The table shows the impact of different design choices on $\mathrm{AP}_{\mathrm{3D}}$ performance. Each component is progressively added. To save resources, ablations are conducted on $10\%$ of the full training dataset. + +
Depth&Cam.Merge DINO2D Agg.ZEMAP3D ↑
----5.81
---10.10
--20.20
-23.21
25.80
+ +text, and (iii) image + text. With 3D box constraints, Sora generates videos better aligned with intent. + +# 4.4. Ablation Studies + +As shown in Table 3, we ablate key components of DetAny3D, showing the evolution from a SAM-based baseline to DetAny3D with strong 3D generalization. The base model extends SAM with 3D box tokens and a 3D head for direct box prediction. Additional ablations, including backbone and prompt types, are in Supplementary Section 9. + +- Effectiveness of Depth & Camera Modules. Depth map provides denser supervision, while camera configuration intrinsic help mitigate disruptions caused by multiple datasets training. Integrating both depth map and camera intrinsic yields improvement in 3D feature extraction and generalization across diverse datasets. +- Effectiveness of Merging Depth-Pretrained DINO. Incorporating depth-pretrained DINO yields remarkable improvements, demonstrating that the rich geometric in + +formation from DINO effectively compensates for SAM's limited geometric understanding. + +- Effectiveness of 2D Aggregator. Compared to directly adding the features from two models, the 2D Aggregator reduces conflicts between different foundation models, further unleashing the performance gains from two foundation model integration. +- Effectiveness of ZEM. ZEM mechanism integrates geometric features through zero-initialized layers, which enables stable 2D-to-3D knowledge transfer during training across datasets with diverse camera parameters, scenes, and depth distributions. + +# 4.5. Qualitative Results + +We provide qualitative comparisons with OVMono3D. As shown in Figure 4, our model predicts more accurate intrinsics when the camera parameters are unknown and infers more consistent camera poses and 3D detections. + +# 5. Conclusions + +We propose DetAny3D, a promptable 3D detection foundation model that can detect arbitrary 3D objects from any monocular image input. DetAny3D exhibits significant zero-shot detection capabilities across diverse domains and effective zero-shot transfer across various tasks, highlighting its suitability for real-world deployment in dynamic and unstructured environments. Moreover, its flexible and robust detection ability opens the door to gathering large-scale, multi-source data for more 3D perception-guided tasks, paving the way toward open-world systems. + +# Acknowledgements + +We sincerely thank Jiazhi Yang, Tianyu Li, Haochen Tian, Jisong Cai, and Li Chen for their invaluable discussions and constructive feedback throughout this project. Their insights and expertise have contributed significantly to the success of this work. We also appreciate the continuous support and encouragement from all the members of OpenDriveLab. This work is supported by the National Key Research and Development Program of China (2024YFE0210700), the National Natural Science Foundation of China (NSFC) under Grants 62206172 and 62432008, and the Shanghai Artificial Intelligence Laboratory. It is also partially funded by Meituan Inc. + +# References + +[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3 +[2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In CVPR, 2021. 6, 13 +[3] Umar Asif, Jianbin Tang, and Stefan Harrer. Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices. In IJCAI, 2018. 2 +[4] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Yuri Feigin, Peter Fu, Thomas Gebauer, Daniel Kurz, Tal Dimry, Brandon Joffe, Arik Schwartz, et al. Arkitsscenes: A diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. In NeurIPS Datasets, 2021. 6, 13 +[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.3 +[6] Georg Biegelbauer and Markus Vincze. Efficient 3d object detection by fitting superquadrics to range image data for robot's object manipulation. In ICRA, 2007. 2 +[7] Aleksei Bochkovskii, Amaël Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073, 2024. 2 +[8] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild. In CVPR, 2023. 2, 3, 5, 6, 7, 13 +[9] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 3, 6, 13 + +[10] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In ICCV, 2021. 2 +[11] Sergio Casas, Abbas Sadat, and Raquel Urtasun. Mp3: A unified model to map, perceive, predict and plan. In CVPR, 2021. 2 +[12] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE TPAMI, 2024. 2 +[13] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024. 3 +[14] Xiaozhi Chen, Kaustav Kundu, Ziyu Zhang, Huimin Ma, Sanja Fidler, and Raquel Urtasun. Monocular 3d object detection for autonomous driving. In CVPR, 2016. 2, 3 +[15] Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In CVPR, 2017. 2 +[16] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. In ICLR, 2023. 4 +[17] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017. 6 +[18] Saumitro Dasgupta, Kuan Fang, Kevin Chen, and Silvio Savarese. Delay: Robust spatial layout estimation for cluttered indoor scenes. In CVPR, 2016. 3 +[19] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. In NeurIPS, 2014. 5 +[20] Hao-Shu Fang, Chenxi Wang, Minghao Gou, and Cewu Lu. Graspnet-1billion: A large-scale benchmark for general object grasping. In CVPR, 2020. 2 +[21] Nils Gählert, Nicolas Jourdan, Marius Cordts, Uwe Franke, and Joachim Denzler. Cityscapes 3d: Dataset and benchmark for 9 dof vehicle detection. arXiv preprint arXiv:2006.07864, 2020. 6 +[22] Ruiyuan Gao, Kai Chen, Enze Xie, HONG Lanqing, Zhenguo Li, Dit-Yan Yeung, and Qiang Xu. Magicdrive: Street view generation with diverse 3d geometry control. In ICLR, 2023. 2 +[23] Ruiyuan Gao, Kai Chen, Zhihao Li, Lanqing Hong, Zhenguo Li, and Qiang Xu. Magicdrive3d: Controllable 3d generation for any-view rendering in street scenes. arXiv preprint arXiv:2405.14475, 2024. 2 +[24] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. *IJRR*, 2013. 3, 6, 13 +[25] Jakob Geyer, Yohannes Kassahun, Mentor Mahmudi, Xavier Ricou, Rupesh Durgesh, Andrew S Chung, Lorenz Hauswald, Viet Hoang Pham, Maximilian Mühlegg, Sebastian Dorn, et al. A2d2: Audi autonomous driving dataset. arXiv preprint arXiv:2004.06320, 2020. 6 + +[26] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. 3 +[27] Ziyu Guo*, Renrui Zhang*, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023. 3 +[28] Ziyu Guo*, Renrui Zhang*#, Xiangyang Zhu, Chengzhuo Tong, Peng Gao, Chunyuan Li, and Pheng-Ann Heng. Sam2point: Segment any 3d as videos in zero-shot and promptable manners. arXiv preprint arXiv:2408.16768, 2024.3 +[29] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025.3 +[30] Xiankang He, Guangkai Xu, Bo Zhang, Hao Chen, Ying Cui, and Dongyan Guo. Diffcalib: Reformulating monocular camera calibration as diffusion-based dense incident map generation. arXiv preprint arXiv: 2405.15619, 2024. 5 +[31] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In CVPR, 2023. 2 +[32] Jin-Cheng Jhang, Tao Tu, Fu-En Wang, Ke Zhang, Min Sun, and Cheng-Hao Kuo. V-mind: Building versatile monocular indoor 3d detector with diverse 2d annotations. In WACV, 2025. 3 +[33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 2, 3, 6 +[34] Tobias Koch, Lukas Liebel, Friedrich Fraundorfer, and Marco Korner. Evaluation of cnn-based single-image depth estimation methods. In ECCVW, 2018. 6 +[35] Maksim Kolodiazhnyi, Anna Vorontsova, Matvey Skripkin, Danila Rukhovich, and Anton Konushin. Unidet3d: Multi-dataset indoor 3d object detection. arXiv preprint arXiv:2409.04234, 2024. 2 +[36] Buyu Li, Wanli Ouyang, Lu Sheng, Xingyu Zeng, and Xiaogang Wang. Gs3d: An efficient 3d object detection framework for autonomous driving. In CVPR, 2019. 2 +[37] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In CVPR, 2022. 3 +[38] Xiaofan Li, Yifu Zhang, and Xiaoqing Ye. Drivingdiffusion: Layout-guided multi-view driving scenarios video generation with latent diffusion model. In European Conference on Computer Vision, 2024. 2 +[39] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer: + +learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE TPAMI, 2024. 3 +[40] Zhuoling Li, Xiaogang Xu, SerNam Lim, and Hengshuang Zhao. Unimode: Unified monocular 3d object detection. In CVPR, 2024. 2, 3, 5 +[41] Tingting Liang, Hongwei Xie, Kaicheng Yu, Zhongyu Xia, Zhiwei Lin, Yongtao Wang, Tao Tang, Bing Wang, and Zhi Tang. Bevfusion: A simple and robust lidar-camera fusion framework. In NeurIPS, 2022. 3 +[42] Xuewu Lin, Tianwei Lin, Zixiang Pei, Lichao Huang, and Zhizhong Su. Sparse4d: Multi-view 3d object detection with sparse spatial-temporal fusion. arXiv preprint arXiv:2211.10581, 2022. 3 +[43] Luyang Liu, Hongyu Li, and Marco Gruteser. Edge assisted real-time object detection for mobile augmented reality. In MobiCom, 2019. 2 +[44] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In ECCV, 2024. 2, 3, 5, 6 +[45] Zechen Liu, Zizhang Wu, and Roland Tóth. Smoke: Single-stage monocular 3d object detection via keypoint estimation. In CVPRW, 2020. 3, 7 +[46] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 6 +[47] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6 +[48] Xinzhu Ma, Wanli Ouyang, Andrea Simonelli, and Elisa Ricci. 3d object detection from images for autonomous driving: a survey. IEEE TPAMI, 2023. 2 +[49] Jiageng Mao, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. 3d object detection for autonomous driving: A comprehensive survey. IJCV, 2023. 2 +[50] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2 +[51] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. TMLR, 2024. 2, 3, 6 +[52] Youngmin Park, Vincent Lepetit, and Woontack Woo. Multiple 3d object tracking for augmented reality. In ISMAR, 2008. 2 +[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 6 +[54] Luigi Piccinelli, Yung-Hsu Yang, Christos Sakaridis, Mattia Segu, Siyuan Li, Luc Van Gool, and Fisher Yu. Unidepth: Universal monocular metric depth estimation. In CVPR, 2024. 2, 3, 4, 5, 6, 14 + +[55] Zhangyang Qi, Zhixiong Zhang, Ye Fang, Jiaqi Wang, and Hengshuang Zhao. Gpt4scene: Understand 3d scenes from videos with vision-language models. arXiv preprint arXiv:2501.01428, 2025. 3 +[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 2, 3 +[57] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 13 +[58] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In WACV, 2022. 7 +[59] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 6 +[60] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beiwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In ECCV, 2024. 3 +[61] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 6, 13 +[62] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 6 +[63] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3 +[64] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. Demon: Depth and motion network for learning monocular stereo. In CVPR, 2017. 5 +[65] Johanna Wald, Armen Avetisyan, Nassir Navab, Federico Tombari, and Matthias Nießner. Rio: 3d object instance re-localization in changing indoor environments. In ICCV, 2019. 6, 15 +[66] Tai Wang, Xinge Zhu, Jiangmiao Pang, and Dahua Lin. Fcos3d: Fully convolutional one-stage monocular 3d object detection. In ICCV, 2021. 3, 5 +[67] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. Embodiedscan: A holistic multimodal 3d perception suite towards embodied ai. In CVPR, 2024. 2 +[68] Zhenyu Wang, Ya-Li Li, Xi Chen, Hengshuang Zhao, and Shengjin Wang. Uni3detr: Unified 3d detection transformer. In NeurIPS, 2023. 2, 7 + +[69] Zhenyu Wang, Yali Li, Taichi Liu, Hengshuang Zhao, and Shengjin Wang. Ov-uni3detr: Towards unified open-vocabulary 3d object detection via cycle-modality propagation. In ECCV, 2024. 3 +[70] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, et al. Argoverse 2: Next generation datasets for self-driving perception and forecasting. In NeurIPS Datasets, 2023. 6 +[71] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In CVPR, 2019. 6 +[72] Jie Yang, Bingliang Li, Ailing Zeng, Lei Zhang, and Ruimao Zhang. Open-world human-object interaction detection via multi-modal prompts. In CVPR, 2024. 3 +[73] Xiuyu Yang, Yunze Man, Junkun Chen, and Yu-Xiong Wang. Scenecraft: Layout-guided 3d scene generation. In NeurIPS, 2025. 2 +[74] Jin Yao, Hao Gu, Xuweiyi Chen, Jiayun Wang, and Zezhou Cheng. Open vocabulary monocular 3d object detection. arXiv preprint arXiv:2411.16833, 2024. 2, 3, 5, 6, 7, 13, 15 +[75] Kaixin Yao, Longwen Zhang, Xinhao Yan, Yan Zeng, Qixuan Zhang, Lan Xu, Wei Yang, Jiayuan Gu, and Jingyi Yu. Cast: Component-aligned 3d scene reconstruction from anrgb image. arXiv preprint arXiv:2502.12894, 2025. 2 +[76] Wei Yin, Chi Zhang, Hao Chen, Zhipeng Cai, Gang Yu, Kaixuan Wang, Xiaozhi Chen, and Chunhua Shen. Metric3d: Towards zero-shot metric 3d prediction from a single image. In ICCV, 2023. 2 +[77] Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 6 +[78] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In CVPR, 2022. 3 +[79] Renrui Zhang, Zhengkai Jiang, Ziyu Guo, Shilin Yan, Junting Pan, Hao Dong, Peng Gao, and Hongsheng Li. Personalize segment anything model with one shot. *ICLR*, 2023. 3 +[80] Renrui Zhang, Han Qiu, Tai Wang, Ziyu Guo, Ziteng Cui, Yu Qiao, Hongsheng Li, and Peng Gao. Monodetr: Depth-guided transformer for monocular 3d object detection. In ICCV, 2023. 3, 5 +[81] Renrui Zhang, Jiaming Han, Chris Liu, Aojun Zhou, Pan Lu, Yu Qiao, Hongsheng Li, and Peng Gao. Llama-adapter: Efficient fine-tuning of large language models with zero-initialized attention. In ICLR, 2024. 3 +[82] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739, 2024. 3 +[83] Haoyi Zhu, Honghui Yang, Xiaoyang Wu, Di Huang, Sha Zhang, Xianglong He, Hengshuang Zhao, Chunhua Shen, Yu + +Qiao, Tong He, et al. Ponderv2: Pave the way for 3d foundation model with a universal pre-training paradigm. arXiv preprint arXiv:2310.08586, 2023. 3 +[84] Menglong Zhu, Konstantinos G Derpanis, Yinfei Yang, Samarth Brahmbhatt, Mabel Zhang, Cody Phillips, Matthieu Lecce, and Kostas Daniilidis. Single image 3d object detection and pose estimation for grasping. In ICRA, 2014. 2 +[85] Ziyu Zhu, Zhuofan Zhang, Xiaojian Ma, Xuesong Niu, Yixin Chen, Baoxiong Jia, Zhidong Deng, Siyuan Huang, and Qing Li. Unifying 3d vision-language understanding via promptable queries. In ECCV, 2024. 3 +[86] Yiming Zuo, Karhan Kayan, Maggie Wang, Kevin Jeon, Jia Deng, and Thomas L Griffiths. Towards foundation models for 3d vision: How close are we? arXiv preprint arXiv:2410.10799, 2024. 2 + +# Detect Anything 3D in the Wild Supplementary Material + +# 6. DA3D + +DA3D is a unified 3D detection dataset, consists of 16 diverse datasets. It builds upon six datasets in Omni3D—Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]—while partially incorporating an additional 10 datasets to further enhance the scale, diversity, and generalization capabilities of 3D detection models. As shown in Figure 5, DA3D comprises 0.4 million frames ( $2.5 \times$ the scale of Omni3D), spanning 20 distinct camera configurations. + +The dataset is standardized with the similar structure to Omni3D [8], including monocular RGB images, camera intrinsics, 3D bounding boxes, and depth maps. DA3D is designed to test 3D detection models across a wide variety of environments, camera configurations, and object categories, offering a more comprehensive evaluation setting. + +# 6.1. Dataset Composition + +We categorize the datasets in DA3D based on two aspects: + +Indoor vs. Outdoor. As shown in Figure 6 (left), DA3D expands both indoor and outdoor datasets compared to Omni3D. Additionally, the ratio of indoor to outdoor data in DA3D is more balanced than in Omni3D, ensuring a more representative distribution for models trained across diverse environments. + +Supervision Types. We also analyze DA3D in terms of the distribution of supervision types (See Figure 6 (right)): + +- $35\%$ data provides only depth supervision. +- $23\%$ data provide only 3D bounding box annotations. +- $42\%$ data contains both depth maps and 3D bounding boxes. +- Intrinsic parameters are available for all data. + +# 6.2. Dataset Splits. + +For training and evaluation, we follow the dataset splitting strategy used in prior works [8]. Specifically: + +- We construct the training set by merging training subsets from the original datasets. +- We form the validation set by sampling from the original training data, ensuring balanced representation. +- We use the original validation sets of each dataset as the test set, allowing for direct comparison with previous benchmarks. + +This setup ensures fair evaluation and maintains consistency with existing benchmarks while assessing both indomain and zero-shot generalization capabilities. + +![](images/030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg) +Figure 5. The composition of the DA3D dataset. + +![](images/f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg) +Figure 6. The data distribution of the DA3D dataset. (left): the statistics of indoor and outdoor data. (right): the statistics of data with different supervision categories. + +# 6.3. Evaluation Setup + +DA3D is designed to evaluate zero-shot generalization in both novel object categories and novel camera configurations. We define two evaluation settings: + +Zero-Shot Categories. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as unseen classes for zero-shot testing. + +# Zero-Shot Datasets. + +- We use Cityscapes3D, Waymo, and 3RScan as unseen datasets with novel camera configurations. +- Cityscapes3D & Waymo introduce new intrinsics and image styles, challenging models to generalize across different camera setups. +- 3RScan not only introduces novel camera setups, but also contains unseen object categories, making it useful for testing both category and camera generalization. + +![](images/21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg) +Figure 7. Detailed implementation of camera and depth module from UniDepth. + +# 7. Model Details + +# 7.1. Camera and Depth Module Details + +This section introduces how the camera module and depth module work, predicting intrinsic and camera-aware depth, also related feature. + +As show in Figure 7, the fused feature $\hat{\mathbf{F}}_{\mathrm{fused}}$ are input into the camera module, which uses a cross-attention mechanism and a to obtain the camera intrinsic parameters. These intrinsic parameters are then used to generate camera rays. The rays are defined as: + +$$ +(r _ {1}, r _ {2}, r _ {3}) = \mathbf {K} ^ {- 1} \left[ \begin{array}{l} u \\ v \\ 1 \end{array} \right] +$$ + +where $\mathbf{K}$ is the calibration matrix, $u$ and $v$ are the pixel coordinates, and 1 is a vector of ones. In this context, the homogeneous camera rays $(r_x,r_y)$ are derived from: + +$$ +\left( \begin{array}{c} r _ {1} \\ \hline r _ {3} \end{array} , \frac {r _ {2}}{r _ {3}}\right) +$$ + +This dense representation of the camera rays undergoes Laplace Spherical Harmonic Encoding (SHE) [54] to produce the embeddings $\mathbf{C}$ . These embeddings are then passed to the depth module using the cross-attention mechanism. + +The depth feature conditioned on the camera embeddings, is computed as: + +$$ +\mathbf {D} \mid \mathbf {C} = \operatorname {M L P} (\operatorname {C r o s s A t t n} (\mathbf {D}, \mathbf {C})) +$$ + +Subsequently, the depth feature is processed through an upsampling head to predict the final depth map. + +# 7.2.3D Box Head Details + +This section introduces the details of the 3D box head. After the query $\mathbf{Q}$ passes through the Geometric Transformer + +![](images/8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg) +Figure 8. 3D Box head details. + +and Two-Way Transformer, the model outputs $\mathbf{O}$ . $\mathbf{O}$ contains outputs corresponding to both 3D-related hidden states $\mathbf{O}_{3D}$ and prompt hidden states $\mathbf{O}_p$ . We extract the 3D-related output $\mathbf{O}_{3D}$ for further processing. + +Subsequently, $\mathbf{O}_{3\mathrm{D}}$ is passed through a series of prediction heads as shown in Figure 8. + +We then transform these predictions into the final 3D bounding box parameters and obtain the 3D bounding box $(x,y,z,w,h,l,R,S)$ for each detected object, where $(x,y,z)$ denotes the 3D center, $(w,h,l)$ represent the dimensions, and $(R,S)$ describe the rotation and predicted 3D IoU score. + +# 7.3. Loss Details + +Depth Loss. The depth module is supervised using the Scale-Invariant Logarithmic (SILog) loss, defined as: + +$$ +\mathcal {L} _ {\text {d e p t h}} = \sqrt {\frac {1}{N} \sum_ {i = 1} ^ {N} \Delta d _ {i} ^ {2} - 0 . 1 5 \cdot \left(\frac {1}{N} \sum_ {i = 1} ^ {N} \Delta d _ {i}\right) ^ {2}} \tag {10} +$$ + +where $\Delta d_{i} = \log (d_{i}^{\mathrm{pred}}) - \log (d_{i}^{\mathrm{gt}})$ , and $N$ is the number of valid depth pixels. + +Camera Intrinsic Loss. The camera error is computed with the dense camera rays. For an image with height $H$ and width $W$ , the intrinsic loss is formulated as: + +$$ +\mathcal {L} _ {\mathrm {c a m}} = \sqrt {\frac {1}{H W} \sum_ {i = 1} ^ {H W} \Delta r _ {i} ^ {2} - 1 \cdot \left(\frac {1}{H W} \sum_ {i = 1} ^ {H W} \Delta r _ {i}\right) ^ {2}} \tag {11} +$$ + +where $\Delta r_{i} = r_{i}^{\mathrm{pred}} - r_{i}^{\mathrm{gt}}$ + +Detection Loss. The detection loss consists of three components: + +- Smooth L1 loss for box regression, covering the prediction of center, depth, and dimensions. +- Chamfer loss for rotation matrix prediction, ensuring accurate orientation estimation. + +![](images/eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg) +Figure 9. An example on 3RScan. The left image shows the original 3RScan annotations, while the right image presents the detection results from Grounding DINO after feeding in all the 3RScan labels. Severe naming ambiguities (e.g., "trash can" vs. "rubbish bin") and missing annotations lead to a substantial decrease in the detector's performance. + +- Mean squared error (MSE) loss for 3D IoU score prediction, which optimizes the confidence estimates of detected objects. + +Combining these terms, the total detection loss is: + +$$ +\mathcal {L} _ {\mathrm {d e t}} = \mathcal {L} _ {\mathrm {b o x}} + \mathcal {L} _ {\mathrm {r o t}} + \mathcal {L} _ {\mathrm {i o u}}, \tag {12} +$$ + +# 8. Target-aware Metrics + +In our work, we evaluate both traditional metrics and the target-aware metrics proposed by OVMono3D [74]. Under the target-aware paradigm, rather than prompting the model with all possible classes from an entire dataset, we only prompt it with the classes present in the current image during inference. This is designed to address two key challenges encountered: + +- Missing annotations: Comprehensive 3D annotation is often impractical or prohibitively expensive, leading to incomplete ground-truth annotations. +- Naming ambiguity: Datasets may label the same objects with inconsistent category names or annotation policies, creating confusion when merging datasets. + +As illustrated in Figure 9, these issues are especially pronounced in the 3RScan [65] dataset. The left side shows the official 3RScan annotations, while the right side shows detections from Grounding DINO, which are largely misaligned with the dataset's labeling conventions. Consequently, traditional evaluation metrics may yield misleading or inconsistent results, whereas target-aware metrics help mitigate these mismatches by restricting the evaluated classes to those actually present in the scene. + +# 9. More Ablation Study + +# 9.1. Various Prompts Performance + +In this section, we evaluate different types of prompts, including box prompts, point prompts, and text prompts, both with and without intrinsic prompts. The results on Omni3D + +Table 4. Various Prompt Performance. + +
Prompt TypeBoxPointText
w/ Intrinsic Prompt34.3825.1922.31
w/o Intrinsic Prompt32.1624.021.02
+ +Table 5. Ablation on different backbones. The table reports $\mathrm{AP}_{3\mathrm{D}}$ scores. We verify the effectiveness of SAM and DINO along two dimensions: (1) whether or not we use the pretrained SAM parameters, and (2) whether adopt the pretrained DINO backbone or ConvNeXt for the depth module. + +
Backbonew/ SAMw/o SAM
DINO25.8019.12
ConvNeXt23.1118.27
+ +are presented in Table 4. Each prompt type demonstrates its effectiveness in guiding 3D detection. Besides, on the zero-shot datasets, we observe that omitting intrinsic prompts leads to a significant performance drop (even approaching zero), which further highlights the critical role of intrinsic prompts for reliable depth calibration in unseen scenarios. + +# 9.2. Ablation on Different Backbones + +In this section, we investigate our choice of backbone by comparing the use of SAM and DINO backbones. For DINO, we replace it with ConvNeXt and adopt the same pretraining method proposed by UniDepth. For SAM, we examine its effect by removing the SAM-pretrained weights and training from scratch. As shown in Table 5, SAM's pretrained parameters prove crucial for boosting performance. Meanwhile, compared to ConvNeXt, DINO offers richer geometric representations, resulting in stronger 3D detection performance. + +# 9.3. Ablation on DA3D Dataset + +We ablate the impact of the DA3D dataset in Tab. 6. The additional data in DA3D primarily improves generalization to novel cameras, as Omni3D contains only two distinctive intrinsics for outdoor scenes. + +Table 6. Ablation on training datasets. Unless specified, all models are trained on the Omni3D dataset. For the in-domain setting, prompts are provided by Cube R-CNN, while prompts for novel classes and novel datasets are generated by Grounding DINO. + +
MethodIn-domain +APommi3d +3DNovel ClassNovel Camera
APkit +3DAPsun +3DAPcity +3DAP3rs +3D
Cube R-CNN23.26--8.22 / --
OVMono3D22.984.71 / 4.714.07 / 16.785.88 / 10.980.37 / 8.48
DetAny3D24.3323.75 / 23.757.63 / 20.878.31 / 11.680.64 / 9.56
DetAny3DDA3D24.9225.73 / 25.737.63 / 21.0711.05 / 15.710.65 / 9.58
+ +# 9.4. Ablation on Inference Speed + +We compare the inference speed of DetAny3D with prior methods in Table 7. DetAny3D runs at 1.5 FPS on a single KITTI image, which is slower than Cube R-CNN (33.3 FPS) and OVMono3D (7.1 FPS). This is a trade-off for stronger generalization across novel categories and cameras, as DetAny3D is designed as a foundation model rather than for real-time deployment. + +Table 7. Inference speed comparison on KITTI. + +
MethodCube R-CNNOVMono3DDetAny3D
FPS ↑33.37.11.5
+ +# 9.5. Per-category Performance on Novel Classes + +As shown in Table 8, we provide a detailed comparison of per-category $\mathrm{AP}_{3\mathrm{D}}$ on novel classes from the KITTI, SUNRGBD, and ARKitScenes datasets between our DetAny3D and the baseline OVMono3D. DetAny3D shows consistent improvements across most categories. + +# 10. Limitations + +Text Prompt Process. Our method leverages open-vocabulary 2D detectors such as Grounding DINO to convert text prompts into 2D box prompts. While effective, this strategy may cause semantic loss, as textual nuances are not directly injected into the 3D detection pipeline. Moreover, 2D detectors are known to perform poorly under heavy occlusion or partial visibility, introducing a domain gap when transferring their outputs to 3D tasks. + +Inference Efficiency. Although DetAny3D achieves strong generalization across novel categories and camera settings, its inference speed (1.5 FPS) is significantly slower than existing lightweight 3D detectors. This limits its applicability in latency-sensitive scenarios such as real-time robotics or autonomous driving. + +Lack of Temporal Modeling. Our current design operates on single-frame inputs and does not utilize temporal information from video sequences. Incorporating motion cues and enforcing temporal consistency could potentially improve detection accuracy and enable better integration into downstream video-based tasks, such as video knowledge distillation and temporal grounding. + +# 11. Licenses and Privacy + +All data used in this work are obtained from publicly available datasets and are subject to their respective licenses. + +Table 8. Per-category target-aware $\mathrm{AP}_{3\mathrm{D}}$ comparison on novel classes between DetAny3D and OVMono3D. + +
CategoryOVMono3DDetAny3D
Board4.836.02
Printer16.2360.22
Painting2.805.11
Microwave30.3157.21
Tray10.116.70
Podium48.3773.65
Cart47.3133.46
Tram4.7127.90
Easy Categories20.5833.79
Monitor9.4415.95
Bag15.6117.69
Dresser29.0841.75
Keyboard9.139.52
Drawers43.0440.80
Computer7.4412.37
Kitchen Pan9.988.70
Potted Plant6.6626.34
Tissues12.4512.95
Rack10.219.04
Toys5.2416.14
Phone3.894.42
Soundsystem13.226.21
Fireplace13.1630.75
Hard Categories13.4718.05
All Categories16.0523.77
\ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07958/images/030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg b/data/2025/2504_07xxx/2504.07958/images/030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79acc371e1b5880a7b7a6401cce472935dcf15ee --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abecea1fac31462f60878a74ef6e883a105eedce8d21181cdac7b63044ee90f8 +size 32026 diff --git a/data/2025/2504_07xxx/2504.07958/images/163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg b/data/2025/2504_07xxx/2504.07958/images/163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8627dd0843cc415aa2a9d746e74ab9cedd0d0ca --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fadacbf0de0bd9c026fc41e32238ac875d3441c11b777806b029750f23686af +size 74410 diff --git a/data/2025/2504_07xxx/2504.07958/images/1bfe711df5968b24f7bba9359640957445e0da768448431bc6b005b0d5564b35.jpg b/data/2025/2504_07xxx/2504.07958/images/1bfe711df5968b24f7bba9359640957445e0da768448431bc6b005b0d5564b35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c808256a93f9a4f7cbab4b548d676c0a7b36503 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/1bfe711df5968b24f7bba9359640957445e0da768448431bc6b005b0d5564b35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9d0da98b264bd5eb74c40ab849e0662c5d120fde12473eea8c96b7436d0b983 +size 4352 diff --git a/data/2025/2504_07xxx/2504.07958/images/1d30edbfc705f12c2dc603f39cff1ba091a66207250b07d910e9dea9bd132094.jpg b/data/2025/2504_07xxx/2504.07958/images/1d30edbfc705f12c2dc603f39cff1ba091a66207250b07d910e9dea9bd132094.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e15552c239a039cc6619e75c3ae1c5aea7321619 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/1d30edbfc705f12c2dc603f39cff1ba091a66207250b07d910e9dea9bd132094.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7899ea4a7641e3697ddc2f63fc570db6e51fe2f013137ce2ddcda40a1ee467a +size 10167 diff --git a/data/2025/2504_07xxx/2504.07958/images/2064a3cef29d98631876b710c36133004ed5bd90a00421ac59b9bd0bff893d21.jpg b/data/2025/2504_07xxx/2504.07958/images/2064a3cef29d98631876b710c36133004ed5bd90a00421ac59b9bd0bff893d21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9eb49fcb4c1022b8e7b972ddea3550b4dbd970e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/2064a3cef29d98631876b710c36133004ed5bd90a00421ac59b9bd0bff893d21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae6f9912355910b9797804c759cc6c9f15af557932209e27779d67e1f2162459 +size 15381 diff --git a/data/2025/2504_07xxx/2504.07958/images/21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg b/data/2025/2504_07xxx/2504.07958/images/21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0ab072729da96e4d352465604d7bc389998954f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7736935e121fe9a4afd63eb0a93c8db2b85ae8058b9e1d5ee74dbeb4d28f9df8 +size 21027 diff --git a/data/2025/2504_07xxx/2504.07958/images/283dc9dacad50292f519d1562292c83f684402753d0b32c7492054f892880085.jpg b/data/2025/2504_07xxx/2504.07958/images/283dc9dacad50292f519d1562292c83f684402753d0b32c7492054f892880085.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2bf6e03e0e1c9e5f1e2423de2df9ce7706cbf78 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/283dc9dacad50292f519d1562292c83f684402753d0b32c7492054f892880085.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc5d125ffc687586f725addc915d0799827af45c81933a9d5f0c166c3d470b87 +size 7842 diff --git a/data/2025/2504_07xxx/2504.07958/images/299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg b/data/2025/2504_07xxx/2504.07958/images/299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddf83213054afc19872e01234fe94a0fb761b4fa --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:322fd4d69fef4291fd67c832751154488c6cc02af98b43ec838b09b8ccd95f64 +size 196073 diff --git a/data/2025/2504_07xxx/2504.07958/images/3d4fc0898db5ec9fbe8d3c6ce360302596584579310f43e349d85be44983da8a.jpg b/data/2025/2504_07xxx/2504.07958/images/3d4fc0898db5ec9fbe8d3c6ce360302596584579310f43e349d85be44983da8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9100204bed4f105cfbc1b7d6e7ba848c5191c9b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/3d4fc0898db5ec9fbe8d3c6ce360302596584579310f43e349d85be44983da8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a104e6a2454d1c33cdc9c5e485e16324f096535def3db4ac4eeabc8208814fde +size 14028 diff --git a/data/2025/2504_07xxx/2504.07958/images/41475c4856505009cc46d3f9519fa847c0b260e6d0372b3c269e3559b76f5237.jpg b/data/2025/2504_07xxx/2504.07958/images/41475c4856505009cc46d3f9519fa847c0b260e6d0372b3c269e3559b76f5237.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ebfbec5bc7d139ee8209da07c79a791583bc9bd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/41475c4856505009cc46d3f9519fa847c0b260e6d0372b3c269e3559b76f5237.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1d7335225cda2d0226341ebad91af1337a0d4907607bcbff69c93416cc5b85a +size 5465 diff --git a/data/2025/2504_07xxx/2504.07958/images/446c14e45132adeb8fa7de90a1ee31c1dd609b2e2e31f19f391ca1a5296821f4.jpg b/data/2025/2504_07xxx/2504.07958/images/446c14e45132adeb8fa7de90a1ee31c1dd609b2e2e31f19f391ca1a5296821f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a1f5dbe49522262e16a2799818447f997b3efb5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/446c14e45132adeb8fa7de90a1ee31c1dd609b2e2e31f19f391ca1a5296821f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7202ab8cfd57ebf9ea2390e0fe81fa8fb1cebd1604a21ed64cc333a99167b700 +size 98414 diff --git a/data/2025/2504_07xxx/2504.07958/images/4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg b/data/2025/2504_07xxx/2504.07958/images/4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2490d77b5a48d4d942e12be5e10bffe371b5eb88 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a182b48e69edc821d01b1b1235f0f9778714aa18b144b7c2c2ef66bbf1235132 +size 59704 diff --git a/data/2025/2504_07xxx/2504.07958/images/4a6af92d241965c53baf0518f271ed3b67264e1e57962923ddae3551abdc5c6e.jpg b/data/2025/2504_07xxx/2504.07958/images/4a6af92d241965c53baf0518f271ed3b67264e1e57962923ddae3551abdc5c6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5d1882b6e766546eb0d3fc157594b03e60a1cae --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/4a6af92d241965c53baf0518f271ed3b67264e1e57962923ddae3551abdc5c6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f356d672cf3c0714ccf3a39c50c1aeb68cf363585a43aff1fd144ddc90548b99 +size 5693 diff --git a/data/2025/2504_07xxx/2504.07958/images/534cf0be2a897df9ddedd1897bbf20c6bcfde9768fbd7dba638026507cfe94e2.jpg b/data/2025/2504_07xxx/2504.07958/images/534cf0be2a897df9ddedd1897bbf20c6bcfde9768fbd7dba638026507cfe94e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31dce94755c4f82c259cdc9bf1ca9284de21f5e7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/534cf0be2a897df9ddedd1897bbf20c6bcfde9768fbd7dba638026507cfe94e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f941cd2de0d6ad88203c68ddb7db0b8f294f1a3679b47bd2693650a1c79d48e +size 4740 diff --git a/data/2025/2504_07xxx/2504.07958/images/63c168899add200908ab32369bdd433eff8a0a4b37295ebfefda64321412b0b9.jpg b/data/2025/2504_07xxx/2504.07958/images/63c168899add200908ab32369bdd433eff8a0a4b37295ebfefda64321412b0b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..578311a79362545d36ac556e9d3be8807337e703 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/63c168899add200908ab32369bdd433eff8a0a4b37295ebfefda64321412b0b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a94a3089370d8172294ae24e7ce89cbe3244bf2e6bca965299fdc6c863c8bcb +size 79843 diff --git a/data/2025/2504_07xxx/2504.07958/images/6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg b/data/2025/2504_07xxx/2504.07958/images/6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af58fd50740b0b7d73ca2dabdc01c9c8556182d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72b3f0c7aa74bcfd22256601da45b71c46ce73f10d4d17596778498809fc2b1b +size 25804 diff --git a/data/2025/2504_07xxx/2504.07958/images/7e29ff8cb32bc9588a5c6dba8caf4d5feeecdd9ecd9443329a0af0fad606e977.jpg b/data/2025/2504_07xxx/2504.07958/images/7e29ff8cb32bc9588a5c6dba8caf4d5feeecdd9ecd9443329a0af0fad606e977.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96c4e4151ebdeec5954f5488fcdb578c1aee63bd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/7e29ff8cb32bc9588a5c6dba8caf4d5feeecdd9ecd9443329a0af0fad606e977.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:836f814562e5205dc4c427cdc712e713ab6914d719f96ac6d2d5d7d6717628b2 +size 27885 diff --git a/data/2025/2504_07xxx/2504.07958/images/84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg b/data/2025/2504_07xxx/2504.07958/images/84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77203e741e75dc043fdeab9218314c169f91b693 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd8384789d0dd9505776fe69afbfb83b415e9dca8bd5b5bc2597b475842184cd +size 32114 diff --git a/data/2025/2504_07xxx/2504.07958/images/8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg b/data/2025/2504_07xxx/2504.07958/images/8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42ffde3053f03b819e865660a909b0168dba9a93 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d13969856ed42e127adab47538173964dedef43f71dfc6482b295586bae24de5 +size 24910 diff --git a/data/2025/2504_07xxx/2504.07958/images/8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg b/data/2025/2504_07xxx/2504.07958/images/8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f37e4c9deb2c4c1b0ba25ab0ef4a86a4bc057f7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e24820075472c60619f484eaa2bff32f238cd46a159868d171116e68dbf29aab +size 23606 diff --git a/data/2025/2504_07xxx/2504.07958/images/8e4047d9fac1f5250a5333356177ee2f9ce5fdb7d68739d4655955b268b10c54.jpg b/data/2025/2504_07xxx/2504.07958/images/8e4047d9fac1f5250a5333356177ee2f9ce5fdb7d68739d4655955b268b10c54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3bef0ea8f2c5c3bfd900177dc9eed1b47627479 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/8e4047d9fac1f5250a5333356177ee2f9ce5fdb7d68739d4655955b268b10c54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d385d86ae79518a57933c88bbc775888339b919cd2b05a1d069b953a6158e59 +size 18114 diff --git a/data/2025/2504_07xxx/2504.07958/images/91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg b/data/2025/2504_07xxx/2504.07958/images/91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6269859610a260eea285f1657848ec4c485c6f84 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44120081483928ea165fd18624c537664d0cf9cd7e6f17858c48622d0f936e3 +size 47179 diff --git a/data/2025/2504_07xxx/2504.07958/images/97862b908765a136856a0ed4e484ee47780443596a04ca4bb07f91b048c034cf.jpg b/data/2025/2504_07xxx/2504.07958/images/97862b908765a136856a0ed4e484ee47780443596a04ca4bb07f91b048c034cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6709d977c0a5a29e99512a54aa6ea000e1ab657 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/97862b908765a136856a0ed4e484ee47780443596a04ca4bb07f91b048c034cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec50b0a6e0fe50ff49a97b06ab6db0938fc60567dfdd8ba594db5e784ec99a6 +size 12043 diff --git a/data/2025/2504_07xxx/2504.07958/images/a0aba3091b7004c399aef614159e49fe21064ededfca3a3e497de229244a8756.jpg b/data/2025/2504_07xxx/2504.07958/images/a0aba3091b7004c399aef614159e49fe21064ededfca3a3e497de229244a8756.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0750dbf5fed3dc4b173fed6e41aa997dad6d4eeb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/a0aba3091b7004c399aef614159e49fe21064ededfca3a3e497de229244a8756.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a1c1bc1718e55e68efccf564e0ed0a3359f4b55ae689fb98985f87ede7d7198 +size 5721 diff --git a/data/2025/2504_07xxx/2504.07958/images/b252238fbbe5d98f1485db49a7d87d21f121366de113853d5529d66fa624f8cf.jpg b/data/2025/2504_07xxx/2504.07958/images/b252238fbbe5d98f1485db49a7d87d21f121366de113853d5529d66fa624f8cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86fa3877a3145b5fb1993b58372f27ef88fd11de --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/b252238fbbe5d98f1485db49a7d87d21f121366de113853d5529d66fa624f8cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4ffb04fa74939da4e7e26d91e9a93fe9f9210a5fec16e23536ce36ce43f2025 +size 4462 diff --git a/data/2025/2504_07xxx/2504.07958/images/bba475ca3757c24fd87f39ff5c56d0ed96a190907ce290761b5951994746dba0.jpg b/data/2025/2504_07xxx/2504.07958/images/bba475ca3757c24fd87f39ff5c56d0ed96a190907ce290761b5951994746dba0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb8439a4a9dc4c51e5ebe3b387f82c8cd04a495e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/bba475ca3757c24fd87f39ff5c56d0ed96a190907ce290761b5951994746dba0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d250c1b38d311bfa3ca8da611b7701d3829a5f646bd6db34e56d064cd8a2576 +size 2689 diff --git a/data/2025/2504_07xxx/2504.07958/images/be40af5af1549e01c3f5d286d5e1cd6903893f23a28ff81ba2b5b61acae676a5.jpg b/data/2025/2504_07xxx/2504.07958/images/be40af5af1549e01c3f5d286d5e1cd6903893f23a28ff81ba2b5b61acae676a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb8574b888a09b5c54b46715caa1b715c831b4ff --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/be40af5af1549e01c3f5d286d5e1cd6903893f23a28ff81ba2b5b61acae676a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92d2d12284a603850bd304249e1e9b7965fd657098b5c8e99d8055ace85b954d +size 3865 diff --git a/data/2025/2504_07xxx/2504.07958/images/d47015f2b7753c66309425332a31d6c53d79bb5088b122a1302179d821678c08.jpg b/data/2025/2504_07xxx/2504.07958/images/d47015f2b7753c66309425332a31d6c53d79bb5088b122a1302179d821678c08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ada343e8de426d8fa052f00796c88a8ad13ffaf4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/d47015f2b7753c66309425332a31d6c53d79bb5088b122a1302179d821678c08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1f7b0d43b4938194efafb6dfaf54360643c0a3b2b65350a90defde158a3df77 +size 79494 diff --git a/data/2025/2504_07xxx/2504.07958/images/d590554d626109a811e1ec333712a38df32a68a522bdcd979922ea03bb43e31d.jpg b/data/2025/2504_07xxx/2504.07958/images/d590554d626109a811e1ec333712a38df32a68a522bdcd979922ea03bb43e31d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9caacf6889d449702fc0d88958b86fcadedbb93 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/d590554d626109a811e1ec333712a38df32a68a522bdcd979922ea03bb43e31d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50483f7897d852bd6035531b42df369599c5f529f4256033dc2424e8fd113a2c +size 6996 diff --git a/data/2025/2504_07xxx/2504.07958/images/ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg b/data/2025/2504_07xxx/2504.07958/images/ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..113c0879baa72c0426be2647cac14dc8b6cf75d7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca723910da4b3885e2992f5316cbf7fdcbc60573394bbebc8d8bb0807d845fdf +size 84204 diff --git a/data/2025/2504_07xxx/2504.07958/images/eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg b/data/2025/2504_07xxx/2504.07958/images/eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52969c684edc1595d6e8762119390259cb4c0d62 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4e3883ed1e07bd420d8750f0104d50ee65075993826d11e5ce42acb673680da +size 22986 diff --git a/data/2025/2504_07xxx/2504.07958/images/f397f4c5587a2cae38a4aade4c7fdc629aac68b340c7078a1bc03e6bc38aee91.jpg b/data/2025/2504_07xxx/2504.07958/images/f397f4c5587a2cae38a4aade4c7fdc629aac68b340c7078a1bc03e6bc38aee91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd916e51c278f083ec3a87c0a9be368e8d5a6dc5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/f397f4c5587a2cae38a4aade4c7fdc629aac68b340c7078a1bc03e6bc38aee91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f34749d92b2b4b2dbaa08e9f03d44cd1302d9876817659dabe1d1e2d23c370f9 +size 4257 diff --git a/data/2025/2504_07xxx/2504.07958/images/f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg b/data/2025/2504_07xxx/2504.07958/images/f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33c55dec40937739075a1bd4c05cf5ea3e937fe4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00588cdd11bc955013ad9868c5f43eecace35ba3405aaaf15599fa65d220ae9 +size 13897 diff --git a/data/2025/2504_07xxx/2504.07958/images/f86f1c5d4087da4c6e75041cb573925b9e5c37ff133e7b388423242a420533e5.jpg b/data/2025/2504_07xxx/2504.07958/images/f86f1c5d4087da4c6e75041cb573925b9e5c37ff133e7b388423242a420533e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c26743900bbe46103a6c60a2326a8cd239594e4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/f86f1c5d4087da4c6e75041cb573925b9e5c37ff133e7b388423242a420533e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:627fd0f2e3c7649c30bc93405ad3c844e36b8657bcdfd39be3ef7902056506bf +size 3499 diff --git a/data/2025/2504_07xxx/2504.07958/images/fd094f8bfb8df55679742a9eddbcfc7948e1190818e49b8c16a94435d9a33aab.jpg b/data/2025/2504_07xxx/2504.07958/images/fd094f8bfb8df55679742a9eddbcfc7948e1190818e49b8c16a94435d9a33aab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d68e4dc5426b265a4eb1b21fa2852ca88b2826c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/fd094f8bfb8df55679742a9eddbcfc7948e1190818e49b8c16a94435d9a33aab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aa3a3efd0e2ca6503c2f1a51fb281b0cbf8e4c38d73a9723251f0475d21b230 +size 5539 diff --git a/data/2025/2504_07xxx/2504.07958/images/fdd5205f1d548383d657c326d25f7fd8a2d94096644c9594ddfb5be630e84aeb.jpg b/data/2025/2504_07xxx/2504.07958/images/fdd5205f1d548383d657c326d25f7fd8a2d94096644c9594ddfb5be630e84aeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b6c1b348e9aeaa6352d4052f40e3bfd90d56ac8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/images/fdd5205f1d548383d657c326d25f7fd8a2d94096644c9594ddfb5be630e84aeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41ed6df3934e17d07fe6237f4b5d7a7b95259957e9b73225e15b7014960637a6 +size 10527 diff --git a/data/2025/2504_07xxx/2504.07958/layout.json b/data/2025/2504_07xxx/2504.07958/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d1dae710d76ebd22d5f5bafa4432a0fdb497d786 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07958/layout.json @@ -0,0 +1,13470 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 207, + 103, + 403, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 103, + 403, + 121 + ], + "spans": [ + { + "bbox": [ + 207, + 103, + 403, + 121 + ], + "type": "text", + "content": "Detect Anything 3D in the Wild" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "spans": [ + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": "Hanxue Zhang" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Haoran Jiang" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{1,3*}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Qingsong Yao" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{4*}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Yanan Sun" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Renrui Zhang" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Hao Zhao" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Hongyang Li" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Hongzi Zhu" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "text", + "content": ", Zetong Yang" + }, + { + "bbox": [ + 107, + 142, + 504, + 173 + ], + "type": "inline_equation", + "content": "^{1,7}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 176, + 550, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 176, + 550, + 205 + ], + "spans": [ + { + "bbox": [ + 62, + 176, + 550, + 205 + ], + "type": "text", + "content": "1 OpenDriveLab at Shanghai AI Laboratory 2 Shanghai Jiao Tong University 3 Fudan University 4 Stanford University 5 CUHK MMLab 6 Tsinghua University 7 GAC R&D Center" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 184, + 209, + 427, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 209, + 427, + 222 + ], + "spans": [ + { + "bbox": [ + 184, + 209, + 427, + 222 + ], + "type": "text", + "content": "https://github.com/OpenDriveLab/DetAny3D" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 57, + 242, + 553, + 506 + ], + "blocks": [ + { + "bbox": [ + 57, + 242, + 553, + 506 + ], + "lines": [ + { + "bbox": [ + 57, + 242, + 553, + 506 + ], + "spans": [ + { + "bbox": [ + 57, + 242, + 553, + 506 + ], + "type": "image", + "image_path": "299257f0337a99f363ce4d49708330f368be601ba092797380509dd2b32250d9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 513, + 555, + 559 + ], + "lines": [ + { + "bbox": [ + 54, + 513, + 555, + 559 + ], + "spans": [ + { + "bbox": [ + 54, + 513, + 555, + 559 + ], + "type": "text", + "content": "Figure 1. Introducing DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object with arbitrary monocular images in diverse scenes. Our framework enables multi-prompt interaction (e.g., box, point, and text) to deliver open-world 3D detection results (" + }, + { + "bbox": [ + 54, + 513, + 555, + 559 + ], + "type": "inline_equation", + "content": "w \\times h \\times l" + }, + { + "bbox": [ + 54, + 513, + 555, + 559 + ], + "type": "text", + "content": " in centimeter) for novel objects across various domains. It achieves significant zero-shot generalization, outperforming SOTA by up to 21.02 and 5.68 AP3D on novel categories and novel datasets with new camera configurations." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 152, + 579, + 200, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 579, + 200, + 592 + ], + "spans": [ + { + "bbox": [ + 152, + 579, + 200, + 592 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 607, + 295, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 607, + 295, + 691 + ], + "spans": [ + { + "bbox": [ + 54, + 607, + 295, + 691 + ], + "type": "text", + "content": "Despite the success of deep learning in close-set 3D object detection, existing approaches struggle with zero-shot generalization to novel objects and camera configurations. We introduce DetAny3D, a promptable 3D detection foundation model capable of detecting any novel object under arbitrary camera configurations using only monocular inputs. Training a foundation model for 3D detection is fun" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 569, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 555, + 713 + ], + "type": "text", + "content": "damentally constrained by the limited availability of annotated 3D data, which motivates DetAny3D to leverage the rich prior knowledge embedded in extensively pre-trained 2D foundation models to compensate for this scarcity. To effectively transfer 2D knowledge to 3D, DetAny3D incorporates two core modules: the 2D Aggregator, which aligns features from different 2D foundation models, and the 3D Interpreter with Zero-Embedding Mapping, which stabilizes early training in 2D-to-3D knowledge transfer. Experimental results validate the strong generalization of our DetAny3D, which not only achieves state-of-the-art performance on unseen categories and novel camera configura" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 35, + 565 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 35, + 565 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 35, + 565 + ], + "type": "text", + "content": "arXiv:2504.07958v3 [cs.CV] 2 Dec 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 703, + 138, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 703, + 138, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 703, + 138, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": "tions, but also surpasses most competitors on in-domain data. DetAny3D sheds light on the potential of the 3D foundation model for diverse applications in real-world scenarios, e.g., rare object detection in autonomous driving, and demonstrates promise for further exploration of 3D-centric tasks in open-world settings. More visualization results can be found at our code repository." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 178, + 136, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 178, + 136, + 192 + ], + "spans": [ + { + "bbox": [ + 56, + 178, + 136, + 192 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 198, + 295, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 198, + 295, + 414 + ], + "spans": [ + { + "bbox": [ + 55, + 198, + 295, + 414 + ], + "type": "text", + "content": "3D object detection is a fundamental technology for autonomous systems [12, 14, 15, 36, 48, 49], robotics [6, 67, 84], and augmented reality [43, 52]. 3D perception not only enables machines to perceive and interact with the physical world, but also serves as a foundational input for more advanced tasks, such as behavior decision [3, 11, 20, 31], world modeling [22, 23, 38] and 3D scene reconstruction [50, 73, 75]. For practical deployment, a generalizable 3D detector ideally should detect arbitrary objects from easily accessible inputs, such as monocular images, without relying on specific sensor parameters. Such a model would be highly adaptable and reliable for various downstream tasks in diverse and unpredictable environments [15, 36, 43, 84]. Also, accurate detection results provided by such a detector (e.g., generating 3D bounding boxes for even images from the internet) make it a versatile tool, paving the way for scalable 3D systems that leverage Internet-scale data and advance toward open-world scenarios [22, 23, 38, 50, 73]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 415, + 295, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 415, + 295, + 545 + ], + "spans": [ + { + "bbox": [ + 54, + 415, + 295, + 545 + ], + "type": "text", + "content": "Previous research, exemplified by Omni3D [8], has attempted to improve the generalization of the 3D detection system through multi-dataset training [8, 35, 40, 68]. However, despite utilizing large datasets to train a unified detector [8, 40], these approaches provide limited generalization to novel camera configurations and cannot detect unseen object categories beyond predefined label spaces. Therefore, developing a 3D detection foundation model with strong zero-shot generalizability, which is capable of detecting any unseen object under arbitrary camera configurations, remains a crucial and unsolved problem." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 545, + 295, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 688 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 688 + ], + "type": "text", + "content": "While recent advances in 2D foundation models [33, 44, 51, 56] demonstrate remarkable zero-shot capabilities. Segment Anything Model (SAM) [33] features a promptable inference mechanism, supporting user-friendly prompts like points and boxes to segment user-specified objects. Their impressive generalization ability stems from training on billions of annotated images. However, in 3D object detection, the available labeled data is limited to only millions of samples—typically 3-4 orders of magnitude smaller than in 2D images. Such severe data scarcity [74, 86] poses a fundamental challenge, making it nearly infeasible to train a 3D foundation model from scratch." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "In this work, we present DetAny3D, a promptable 3D detection foundation model designed for generalizable 3D" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 179 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 179 + ], + "type": "text", + "content": "object detection using only monocular images (see Figure 1). Given the inherent scarcity of 3D annotated data, we achieve strong generalization from two critical perspectives: model architecture and data utilization. The central insight of our approach is to leverage the extensive prior knowledge encoded within two broadly pre-trained 2D foundation models—SAM [33] and DINO [10, 51)—thus unlocking effective zero-shot 3D detection capabilities with minimal available 3D data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 182, + 553, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 182, + 553, + 324 + ], + "spans": [ + { + "bbox": [ + 313, + 182, + 553, + 324 + ], + "type": "text", + "content": "Specifically, we adopt SAM as our promptable backbone, capitalizing on its versatile and robust object understanding capability derived from large-scale 2D data. Concurrently, we utilize DINO [51] depth-pretrained by UniDepth [54], to offer redundant 3D geometric priors [7, 76], which plays a pivotal role for accurate 3D detection in a monocular setting. To integrate the complementary features from SAM and DINO more effectively, we propose the 2D Aggregator, an attention-based mechanism that aligns these features and dynamically optimizes their contributions via learnable gating. 2D Aggregator fully exploits the strengths of each foundation model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 327, + 553, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 553, + 447 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 553, + 447 + ], + "type": "text", + "content": "To further address the challenge of effectively transferring knowledge from 2D to 3D, we introduce the 3D Interpreter. Central to the 3D Interpreter is the Zero-Embedding Mapping (ZEM) mechanism, which ensures stable 2D-to-3D mapping by reducing early-stage interference and preserving pretrained 2D priors. By stabilizing the training process across diverse datasets with varying camera parameters, scene complexities, and depth distributions, the ZEM mechanism enables progressive zero-shot 3D grounding capabilities, significantly enhancing model generalization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": "To leverage as much 3D-related data as possible, we aggregate a diverse range of datasets, including 16 datasets spanning depth with intrinsic data and 3D detection data, referred as DA3D. Experimental results, using prompts aligned with the baselines, demonstrate three key advantages: (1) Generalization to novel classes: achieves " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "21.0\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "4.3\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": " higher zero-shot " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": " than baselines on novel categories on KITTI, SUNRGBD, and ARKitScenes. (2) Generalization to novel cameras: improves cross-dataset performance by " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "4.7\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "5.7\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "1.1\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": " compared to baseline methods on zero-shot datasets Cityscapes3D, Waymo and 3RScan. (3) Performance on in-domain data: surpasses baseline by " + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "1.6\\%" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 449, + 553, + 616 + ], + "type": "text", + "content": " on Omni3D. Core contributions are summarized in following:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 617, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 313, + 617, + 553, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 553, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 553, + 653 + ], + "type": "text", + "content": "- We develop DetAny3D, a promptable 3D detection foundation model capable of detecting any 3D object in real-world scenarios with arbitrary monocular inputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 654, + 553, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 553, + 701 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 553, + 701 + ], + "type": "text", + "content": "- DetAny3D introduces 2D Aggregator to effectively fuse the features from two 2D foundation models SAM and depth-pretrained DINO, which provide pivot shape and 3D geometric priors for various objects, respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 701, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 701, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 701, + 553, + 713 + ], + "type": "text", + "content": "- In 2D-to-3D knowledge transfer, DetAny3D involves" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 63, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 294, + 120 + ], + "type": "text", + "content": "Zero-Embedding Mapping in 3D Interpreter to achieve stable 2D-to-3D mapping, enabling the model to train robustly across datasets with diverse camera parameters, varying scenes, and different depth distributions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 121, + 294, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 121, + 294, + 180 + ], + "spans": [ + { + "bbox": [ + 56, + 121, + 294, + 180 + ], + "type": "text", + "content": "- The experimental results demonstrate significant advantages of DetAny3D, particularly in accurately detecting unseen 3D objects with arbitrary camera parameters in the zero-shot setting, showcasing its potential across a wide range of real-world applications." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 193, + 144, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 193, + 144, + 205 + ], + "spans": [ + { + "bbox": [ + 55, + 193, + 144, + 205 + ], + "type": "text", + "content": "2. Related works" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 213, + 173, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 213, + 173, + 225 + ], + "spans": [ + { + "bbox": [ + 55, + 213, + 173, + 225 + ], + "type": "text", + "content": "2.1. 3D Object Detection" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 231, + 296, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 231, + 296, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 231, + 296, + 567 + ], + "type": "text", + "content": "Existing 3D object detection systems have predominantly focused on single-dataset optimization, achieving strong performance on benchmark datasets like KITTI [24] and nuScenes [9] through task-specific architectures [14, 18, 39, 41, 42, 45, 66, 80]. While effective in constrained scenarios, these approaches suffer from significant domain gaps when deployed in new contexts, primarily due to their reliance on limited sensor-specific data and closed-set assumptions. Recent works, exemplified by Omni3D [8], have demonstrated the potential of multi-dataset training. Models like Cube R-CNN [8] and UniMODE [40] train a universal monocular 3D detector across multiple datasets, achieving some level of robustness to camera parameters, but are still restricted to predefined classes. V-MIND [32] further addresses the data scarcity challenge by generating pseudo 3D training data from large-scale 2D annotations. Towards more general detection, OV-Uni3DETR [69] pioneers openset detection that is able to detect with multimodal inputs, but it is trained separately for indoor and outdoor domains, thereby limiting its overall generalization. More recently, OVMono3D [74] leverages Grounding DINO's [44] 2D results with a 3D head on unified datasets. However, it does not fully exploit the priors contained in 2D foundation models, leading to performance constraints tied to the limited 3D data. In contrast, our approach fully capitalizes on the knowledge distilled in 2D foundation models while leveraging abundant 3D-related data, thereby enabling the detection of any 3D object from arbitrary monocular inputs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 575, + 201, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 201, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 201, + 588 + ], + "type": "text", + "content": "2.2. Vision Foundation Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 594, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 713 + ], + "type": "text", + "content": "Foundation models have demonstrated significant potential across various domains. For example, language foundation models such as GPT-4 [1] and DeepSeek [5, 26], trained on massive internet-scale corpora, have achieved impressive capabilities in natural language processing across diverse fields [1, 5, 60, 63, 81, 82]. Similarly, foundation models in the vision domain have made remarkable strides [29, 33, 37, 44, 51, 56, 79]. DINOv2 [51], trained on a vast range of curated data from diverse sources, is capable of producing general-purpose visual features that work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 336 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 336 + ], + "type": "text", + "content": "seamlessly across different image distributions and tasks. SAM [33] has taken a step further in the vision domain by introducing promptability, enabling models to generalize to novel visual concepts through large-scale data training and continuous model refinement. In recent years, the development of foundation models in the 3D domain has started to take initial steps [13, 28, 55, 78, 83, 85]. Most existing 3D foundation models are often combined with vision-language models (VLMs) [13, 27, 55, 85], relying on point clouds as input to help the language models understand 3D [13, 85]. While these methods are valuable for scene understanding and semantic tasks, they do not directly provide precise 3D detection results. Moreover, point cloud inputs significantly restrict the use cases [72], as they are not always accessible in many practical scenarios. In contrast to these approaches, we aim to develop a foundation model specifically dedicated to 3D detection tasks with the most general inputs, monocular images. By leveraging the powerful priors from 2D vision foundation models, our approach enables the detection of any 3D object with arbitrary camera configurations, presenting a broad range of practical applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 349, + 492, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 349, + 492, + 363 + ], + "spans": [ + { + "bbox": [ + 313, + 349, + 492, + 363 + ], + "type": "text", + "content": "3. Detect Anything 3D in the Wild" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 370, + 382, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 370, + 382, + 382 + ], + "spans": [ + { + "bbox": [ + 313, + 370, + 382, + 382 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 388, + 555, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 388, + 555, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 388, + 555, + 495 + ], + "type": "text", + "content": "As illustrated in Figure 2(a), DetAny3D takes a monocular RGB image and prompts (e.g., boxes, points, text, intrinsic) as input. The box, point, and text prompts are used to specify objects, while the intrinsic prompts are optional. When not provided, the model predicts the intrinsic parameters and the corresponding 3D detection results. If intrinsic are available, the model can leverage them as geometric constraints to mitigate the ill-posed nature of monocular depth estimation and calibrate its detection results." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 498, + 556, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 640 + ], + "type": "text", + "content": "Specifically, the monocular image is embedded in parallel by two foundational models: SAM [33] for low-level pixel information, underpins the entire promptable architecture. And depth-pretrained DINO [51, 54], which provide rich high-level geometric knowledge, excels in depth-related tasks. These complementary 2D features are then fused through our proposed 2D Aggregator (see Figure 2(b)), which hierarchically aligns low-level and high-level information using cross-attention layers. The fused features are subsequently passed to the Depth/Camera Module, which extracts the camera and camera-aware depth embedding, collectively referred to as geometric embedding." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": "The geometric embedding and the 3D bounding box tokens with encoded prompt tokens are then fed into the 3D Interpreter (see Figure 2(c)), which employs a structure similar to the SAM decoder along with a specialized Zero-Embedding Mapping (ZEM) mechanism. 3D Interpreter injects 3D geometric features while ensuring stable 2D-to" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 72, + 545, + 194 + ], + "blocks": [ + { + "bbox": [ + 59, + 72, + 545, + 194 + ], + "lines": [ + { + "bbox": [ + 59, + 72, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 545, + 194 + ], + "type": "image", + "image_path": "91f911c538bbec2983b29ce2fd15f511826695ab2d50c383e182329360bb8173.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 198, + 545, + 299 + ], + "blocks": [ + { + "bbox": [ + 59, + 198, + 545, + 299 + ], + "lines": [ + { + "bbox": [ + 59, + 198, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 59, + 198, + 545, + 299 + ], + "type": "image", + "image_path": "4568c2ee3c1179d81c0e12e63e2d6ab2b294a4bad4d770fcf97dfd8956d5356c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 304, + 555, + 370 + ], + "lines": [ + { + "bbox": [ + 54, + 304, + 555, + 370 + ], + "spans": [ + { + "bbox": [ + 54, + 304, + 555, + 370 + ], + "type": "text", + "content": "Figure 2. Overview of DetAny3D. It supports arbitrary monocular images as input and performs 3D object detection driven by prompts—box, point, and text to specify target objects and optional camera calibration to calibrate geometric projections. DetAny3D comprises two key modules: (b) 2D Aggregator, which employs a hierarchical cross-attention mechanism to dynamically fuse knowledge from SAM and DINO, with a learnable gate controlling each component's contribution to the geometric embedding; (c) 3D Interpreter, which introduces a Zero-Embedding Mapping (ZEM) strategy based on zero-initialized layers to gradually inject geometric priors, thereby enables zero-shot 3D grounding and avoids catastrophic forgetting during knowledge transfer." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 383, + 296, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 383, + 296, + 443 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 296, + 443 + ], + "type": "text", + "content": "3D knowledge transfer, enabling progressive 3D grounding across diverse data domains. Finally, the model predicts 3D boxes based on the hidden states of the 3D box tokens. Our DetAny3D is trained on selected seen classes and can detect any unseen classes in a zero-shot manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 453, + 148, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 148, + 466 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 148, + 466 + ], + "type": "text", + "content": "3.2.2D Aggregator" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 472, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 295, + 555 + ], + "type": "text", + "content": "To effectively fuse multiple foundation models, we propose 2D Aggregator to aggregate features from SAM and DINO, mitigating potential conflicts between their heterogeneous representations. As illustrated in Figure 2(b), the 2D Aggregator fuses features from SAM and DINO in a hierarchical manner, progressively integrating spatial and geometric information across four cascaded alignment units." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "text", + "content": "Feature Extraction. Given an input image, the SAM encoder extracts high-resolution spatial features " + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_s\\in" + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{H_s\\times W_s\\times C}" + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "text", + "content": ", capturing fine-grained details and boundaries. Simultaneously, the DINO encoder outputs geometry-aware embeddings " + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_d\\in \\mathbb{R}^{H_d\\times W_d\\times C}" + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "text", + "content": ", which is depth-pretrained by UniDepth [54] and provides robust priors for depth and intrinsics. Following the design of ViT Adapter [16], we also employ a convolutional structure to produce preliminary image features, denoted as " + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_q^0" + }, + { + "bbox": [ + 55, + 556, + 295, + 676 + ], + "type": "text", + "content": ", serving as the initial query for subsequent attention-based fusion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "Hierarchical Fusion. Each of the four alignment units fuses SAM and DINO features via cross-attention. In the " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "-th unit, we first apply learnable gating weights " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": " (initial" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "text", + "content": "ized to 0.5) to combine the " + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "text", + "content": "-th block of SAM features " + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_s^i" + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "text", + "content": " and DINO features " + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_d^i" + }, + { + "bbox": [ + 313, + 382, + 553, + 407 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 415, + 553, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 415, + 553, + 429 + ], + "spans": [ + { + "bbox": [ + 365, + 415, + 553, + 429 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {\\text {f u s e d}} ^ {i} = \\alpha_ {i} \\cdot \\mathbf {F} _ {s} ^ {i} + (1 - \\alpha_ {i}) \\cdot \\mathbf {F} _ {d} ^ {i}. \\tag {1}", + "image_path": "f397f4c5587a2cae38a4aade4c7fdc629aac68b340c7078a1bc03e6bc38aee91.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "text", + "content": "We use " + }, + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{fused}}^{i}" + }, + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "text", + "content": " as key and value, while the query feature " + }, + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_q^{i - 1}" + }, + { + "bbox": [ + 313, + 435, + 553, + 460 + ], + "type": "text", + "content": " acts as the query in the cross-attention mechanism:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 349, + 468, + 553, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 468, + 553, + 483 + ], + "spans": [ + { + "bbox": [ + 349, + 468, + 553, + 483 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {q} ^ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {F} _ {q} ^ {i - 1}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}, \\mathbf {F} _ {\\text {f u s e d}} ^ {i}\\right), \\tag {2}", + "image_path": "4a6af92d241965c53baf0518f271ed3b67264e1e57962923ddae3551abdc5c6e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 373, + 491, + 553, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 491, + 553, + 507 + ], + "spans": [ + { + "bbox": [ + 373, + 491, + 553, + 507 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {F}} _ {\\text {f u s e d}} ^ {i} = \\operatorname {N o r m} \\left(\\mathbf {F} _ {\\text {f u s e d}} ^ {i} + \\mathbf {F} _ {q} ^ {i}\\right). \\tag {3}", + "image_path": "1bfe711df5968b24f7bba9359640957445e0da768448431bc6b005b0d5564b35.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 510, + 555, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 555, + 557 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 555, + 557 + ], + "type": "text", + "content": "This design enables the model to dynamically emphasize SAM's spatial details or DINO's semantic and geometric cues at different hierarchy levels while minimizing interference between the two representations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": "Geometric Embeddings. The fused features " + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{F}}_{\\mathrm{fused}}^i" + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "inline_equation", + "content": "i \\in [1,2,3,4]" + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": ", are subsequently processed by the depth and camera modules, following the UniDepth [54] architecture. Specifically, these modules predict the camera embedding " + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": " and camera-aware depth embedding " + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{D}|\\mathbf{C}" + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": ", referred as the geometric embedding " + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C}, \\mathbf{C}\\}" + }, + { + "bbox": [ + 313, + 557, + 555, + 665 + ], + "type": "text", + "content": ". These modules provide aligned depth and camera parameters under the monocular depth ill-posed problem. Further details can be found in the Supplementary material Section 7.1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 666, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 554, + 713 + ], + "type": "text", + "content": "Overall, by progressively aligning multi-scale features and adaptively integrating their contributions, 2D Aggregator effectively leverages the strengths of both foundation models while minimizing potential conflicts." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 147, + 85 + ], + "type": "text", + "content": "3.3. 3D Interpreter" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 296, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 296, + 221 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 296, + 221 + ], + "type": "text", + "content": "The diverse 3D object supervisions across various scenarios, depths, and camera intrinsics introduce challenges to model training. Our 3D Interpreter aims to progressively integrate geometric information while ensuring stable 2D-to-3D knowledge transfer. We introduce Zero-Embedding Mapping (ZEM) mechanism, which incrementally infuses 3D geometry into the decoder via zero-initialized layers—without disrupting the original 2D features. As Figure 2(c) shows, the 3D Interpreter comprises three main components: the Two-Way Transformer, the Geometric Transformer, and the 3D bounding box heads." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 222, + 296, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 222, + 296, + 258 + ], + "spans": [ + { + "bbox": [ + 55, + 222, + 296, + 258 + ], + "type": "text", + "content": "Two-Way Transformer. Following the SAM design, we first concatenate the 3D bounding box tokens with prompt-related tokens to form the query:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 267, + 295, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 267, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 84, + 267, + 295, + 289 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = \\left[ \\left[ \\mathbf {T} _ {\\mathrm {3 D}, 1}; \\mathbf {T} _ {\\mathrm {p}, 1} \\right], \\dots , \\left[ \\mathbf {T} _ {\\mathrm {3 D}, N}; \\mathbf {T} _ {\\mathrm {p}, N} \\right] \\right], \\tag {4}", + "image_path": "a0aba3091b7004c399aef614159e49fe21064ededfca3a3e497de229244a8756.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_{3\\mathrm{D},i}" + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": " denotes the 3D bounding box token for the " + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": "-th object, " + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_{\\mathrm{p},i}" + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": " is the prompt-related token, and " + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "inline_equation", + "content": "[\\cdot ;\\cdot ]" + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": " denotes vector concatenation. The SAM encoder output " + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_s" + }, + { + "bbox": [ + 55, + 297, + 296, + 357 + ], + "type": "text", + "content": " serves as both key and value for the first Two-Way Transformer layer, yielding:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 368, + 295, + 381 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 368, + 295, + 381 + ], + "spans": [ + { + "bbox": [ + 102, + 368, + 295, + 381 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {s} ^ {\\prime} = \\text {T w o W a y T r a n s} (\\mathbf {Q}, \\mathbf {F} _ {s}, \\mathbf {F} _ {s}). \\tag {5}", + "image_path": "534cf0be2a897df9ddedd1897bbf20c6bcfde9768fbd7dba638026507cfe94e2.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 392, + 295, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 392, + 295, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 295, + 415 + ], + "type": "text", + "content": "The initialized parameters of two-way transformer are copied using pre-trained SAM decoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "text", + "content": "Geometric Transformer. We then process the geometric embedding " + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{G}" + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "text", + "content": " (from the 2D Aggregator) through the zero-initialized " + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "text", + "content": " convolutional layer ZEM and add it to " + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_s" + }, + { + "bbox": [ + 55, + 416, + 296, + 464 + ], + "type": "text", + "content": " for use as key and value in the Geometric Transformer:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 474, + 295, + 488 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 474, + 295, + 488 + ], + "spans": [ + { + "bbox": [ + 61, + 474, + 295, + 488 + ], + "type": "interline_equation", + "content": "\\mathbf {G} ^ {\\prime} = \\operatorname {G e o T r a n s} (\\mathbf {Q}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}, \\operatorname {Z E M} (\\mathbf {G}) + \\mathbf {F} _ {s}). \\tag {6}", + "image_path": "d590554d626109a811e1ec333712a38df32a68a522bdcd979922ea03bb43e31d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "text", + "content": "ZEM integrates the geometric embedding and avoids catastrophic forgetting in 2D features. Next, " + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "inline_equation", + "content": "\\mathbf{G}'" + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "text", + "content": " is again passed through ZEM and combined with " + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_s'" + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "text", + "content": ". This enriched representation is used as key and value in the second Two-Way Transformer layer to generate object features " + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 55, + 498, + 296, + 558 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 568, + 296, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 296, + 592 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 296, + 592 + ], + "type": "interline_equation", + "content": "\\mathbf {O} = \\text {T w o W a y T r a n s} \\left(\\mathbf {Q} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}, \\operatorname {Z E M} \\left(\\mathbf {G} ^ {\\prime}\\right) + \\mathbf {F} _ {s} ^ {\\prime}\\right). \\tag {7}", + "image_path": "283dc9dacad50292f519d1562292c83f684402753d0b32c7492054f892880085.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 594, + 295, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 629 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 629 + ], + "type": "text", + "content": "ZEM also helps stabilize parameter updates in the two-way and geometric transformer training, preventing conflicts arising from diverse 3D object supervision." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": "3D Bounding Box Heads. Finally, " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " is fed into the 3D bounding box heads to calculate the final predictions, which follows typical architectures from standard 3D detection frameworks [8, 66, 80]: " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{3D}}(x,y,z,w,h,l,R,S)" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "x,y,z" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " specify the 3D box center, " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "w,h,l" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " are its dimensions, " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " is the rotation matrix, and " + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": " is the predicted 3D Intersection over Union (IoU) score." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 72, + 359, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 359, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 359, + 83 + ], + "type": "text", + "content": "3.4. Loss" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": "Our loss function comprises three components, the depth loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{depth}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ", the camera intrinsic loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{cam}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ", and the detection loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{det}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ". The overall loss is defined as the sum of these three components. For depth loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{depth}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ", we adopt the commonly used SILog loss [19, 64] to supervise depth prediction. For camera intrinsic loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{cam}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ", we follow the dense camera ray approach [30, 54] to represent intrinsics and also employ the SILog loss to measure deviations between predicted and ground-truth parameters. At last, for detection loss " + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{det}}" + }, + { + "bbox": [ + 313, + 89, + 555, + 258 + ], + "type": "text", + "content": ", we use the smooth L1 loss [40, 66, 80] to regress 3D bounding boxes parameters and predicted IOU scores and the Chamfer loss [8, 74] for rotation matrices. Detailed formulations of these loss functions can be found in the supplementary material Section 7.3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 266, + 428, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 266, + 428, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 266, + 428, + 278 + ], + "type": "text", + "content": "3.5. Prompt Interaction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 284, + 554, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 284, + 554, + 332 + ], + "spans": [ + { + "bbox": [ + 313, + 284, + 554, + 332 + ], + "type": "text", + "content": "DetAny3D supports point, box, and text prompts to detect 3D box for user-specified objects. To calibrate more precise depth for specific camera, DetAny3D allows users to specify the camera configuration via the intrinsic prompt." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 332, + 553, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 553, + 403 + ], + "type": "text", + "content": "Box and Point Prompts. Following SAM's methodology, both box and point prompts are encoded based on their respective positions and embeddings. For the box prompt, two points (top-left and bottom-right corners) are used. The point prompt is derived by combining the positional encoding of the point and the corresponding embedding." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 404, + 554, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 404, + 554, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 404, + 554, + 475 + ], + "type": "text", + "content": "Text Prompts. Recent 2D foundation models like Grounding DINO [44] are able to detect bounding box for the open-vocabulary object specified by users using text prompt. DetAny3D can further generate 3D bounding box using the prediction of Grounding DINO, which enables text as prompts in the zero-shot interface." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 476, + 554, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 476, + 554, + 536 + ], + "spans": [ + { + "bbox": [ + 313, + 476, + 554, + 536 + ], + "type": "text", + "content": "Intrinsic Prompts. Unlike most existing 3D detectors that employ a fixed virtual camera and rely on GT intrinsics to recover the true depth, inspired by Unidepth, we predict intrinsics for camera-aware 3D detection. When no intrinsic prompt is given, the model infers intrinsics for outputs:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 346, + 546, + 553, + 561 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 546, + 553, + 561 + ], + "spans": [ + { + "bbox": [ + 346, + 546, + 553, + 561 + ], + "type": "interline_equation", + "content": "\\operatorname {B o x} _ {3 D} = 3 \\text {D I n t e r p r e t o r} (\\mathbf {Q}, \\hat {\\mathbf {G}}, \\mathbf {F} _ {s}), \\tag {8}", + "image_path": "fd094f8bfb8df55679742a9eddbcfc7948e1190818e49b8c16a94435d9a33aab.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "spans": [ + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{G}} = \\{\\mathbf{D}|\\hat{\\mathbf{C}},\\hat{\\mathbf{C}}\\}" + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{C}}" + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "text", + "content": " is the predicted camera embedding, and " + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{D}|\\hat{\\mathbf{C}}" + }, + { + "bbox": [ + 313, + 571, + 554, + 631 + ], + "type": "text", + "content": " is the depth embedding conditioned on the predicted camera embedding. When intrinsic prompts are given, the model refines the 3D detection results based on the true intrinsic:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 346, + 642, + 553, + 656 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 642, + 553, + 656 + ], + "spans": [ + { + "bbox": [ + 346, + 642, + 553, + 656 + ], + "type": "interline_equation", + "content": "\\operatorname {B o x} _ {3 D} = 3 \\mathrm {D I n t e r p r e t o r} (\\mathbf {Q}, \\mathbf {G}, \\mathbf {F} _ {s}), \\tag {9}", + "image_path": "41475c4856505009cc46d3f9519fa847c0b260e6d0372b3c269e3559b76f5237.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{G} = \\{\\mathbf{D}|\\mathbf{C},\\mathbf{C}\\}" + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": ". This boosts performance on both intrinsic prediction and 3D detection since the model continuously predicts and aligns the intrinsic with the 3D detection rather than estimating it solely from input image." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 132, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 132, + 85 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 132, + 85 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 171, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 171, + 105 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 171, + 105 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "spans": [ + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "text", + "content": "DA3D Benchmark. We present DA3D, a unified 3D detection dataset that aggregates 16 diverse datasets for 3D detection and depth estimation. Building upon Omni3D's original datasets (Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]), we incorporate additional four outdoor detection datasets (Argoverse2 [70], A2D2 [25], Waymo [62], Cityscapes3D [21]), one indoor detection dataset (3RScan [65]), and five depth and intrinsic datasets (Scannet [17], Taskonomy [77], DrivingStereo [71], Middlebury [59], IBIMS-1 [34]). All data is standardized with monocular images, camera intrinsics, 3D bounding boxes, and depth maps. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as zero-shot test classes. We select Cityscapes3D, Waymo, and 3RScan as our zero-shot datasets with novel camera configurations, where 3RScan also contains novel object categories. Depth supervision from LiDAR, RGB-D, and stereo sensors enhances " + }, + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "text", + "content": " of training samples, while intrinsic parameters cover 20 camera configurations across 0.4 million frames " + }, + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "inline_equation", + "content": "(2.5\\times" + }, + { + "bbox": [ + 55, + 110, + 297, + 385 + ], + "type": "text", + "content": " Omni3D's scale). Dataset statistics and splits are detailed in Supplementary material Section 6. All data are subject to their respective licenses." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 387, + 296, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 387, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 387, + 296, + 495 + ], + "type": "text", + "content": "Baselines. We choose Cube R-CNN [8] and OV-Mono3D [74] as our primary baselines, as their settings align most closely with our experimental protocol: Cube R-CNN is a benchmark provided by the Omni3D dataset. It is a unified detector capable of performing detection on predefined categories. OVMono3D is a recently available open-vocabulary 3D detector on the Omni3D dataset. It lifts 2D detection to 3D by connecting the open-vocabulary 2D detector Grounding DINO [44] with a detection head." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 497, + 297, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 497, + 297, + 652 + ], + "spans": [ + { + "bbox": [ + 55, + 497, + 297, + 652 + ], + "type": "text", + "content": "Metrics. We adopt the metrics in the Omni3D benchmark [8], which is Average Precision (AP). Predictions are matched to ground-truth by measuring their overlap using IoU3D, which computes the intersection-over-union (IoU) of 3D cuboids. The IoU3D thresholds range from " + }, + { + "bbox": [ + 55, + 497, + 297, + 652 + ], + "type": "inline_equation", + "content": "\\tau \\in [0.05, 0.10, \\dots, 0.50]" + }, + { + "bbox": [ + 55, + 497, + 297, + 652 + ], + "type": "text", + "content": ". For experiments using text prompts, we additionally employ target-aware metrics from OVMono3D [74]: Prompt the detector only with category names present in the per-image annotations instead of providing an exhaustive category list. This addresses severe naming ambiguity (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotation issues prevalent in indoor datasets like 3RScan (see Supplementary material Section 8.)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 654, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 296, + 715 + ], + "type": "text", + "content": "Implementation Details. We implement DetAny3D via PyTorch [53]. We use the pretrained ViT-L DINOv2 [51, 54] and ViT-H SAM [33] as our initial models, with SAM serving as the promptable backbone, where the encoder is frozen during training. All main experiments are conducted" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "type": "text", + "content": "using 8 NVIDIA A100 machines with 8 GPUs for each and a batch size of 64. The model is trained for 80 epochs, taking approximately 2 weeks to complete. The training uses the AdamW [47] optimizer with an initial learning rate of 0.0001, adjusted according to the cosine annealing policy [46]. During box prompt training, we apply a 0.1 positional offset disturbance. For point prompt training, points are randomly selected from the mask. Text prompts are converted into box prompts via Grounding DINO SwinT [44]. For fair comparisons, all baseline-related experiments incorporate intrinsic prompts and use aligned prompt inputs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 215, + 399, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 215, + 399, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 215, + 399, + 227 + ], + "type": "text", + "content": "4.2. Main Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": "Zero-shot Category Performance. In this experiment, we use two sources for the prompt input: text prompt processed by Grounding DINO and box prompt from ground-truth 2D bounding box. We evaluate our model on KITTI, SUNRGBD, and ARKitScenes datasets with the same zero-shot categories as OVMono3D [74]. As shown in Table 1 (left), our DetAny3D demonstrates superior zero-shot adaptation performance compared to the OVMono3D baseline. When using Grounding DINO for text prompt input, our method achieves significant improvements of " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "21.02\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on KITTI, " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "4.29\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on SUNRGBD, and " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "11.35\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on ARKitScenes under the target-aware metric. When using 2D ground-truth as box prompt input, DetAny3D attains " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "28.96\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on KITTI, " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "39.09\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on SUNRGBD, and " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "57.72\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " on ARKitScenes, showing " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "3.4\\times" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "2.3\\times" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "inline_equation", + "content": "4.1\\times" + }, + { + "bbox": [ + 313, + 233, + 556, + 449 + ], + "type": "text", + "content": " gains over the baseline, respectively. This substantial performance gap highlights our method's enhanced ability to generalize to novel object categories." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "text", + "content": "Zero-shot Camera Performance. To assess robustness against novel camera parameters, we conduct cross-dataset evaluation as shown in Table 1 (right). For Cityscapes3D and Waymo, We use Cube R-CNN's 2D detections and ground-truth as box prompt and Grounding DINO processed text prompt for comparison. For 3RScan, due to namespace inconsistency with Cube R-CNN's predefined categories and the presence of novel classes, we only use text prompt and ground-truth box prompts, benchmarking against OVMono3D. DetAny3D exhibits strong adaptation to unseen camera configurations. When using Cube R-CNN-aligned prompts, our model achieves " + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "text", + "content": " scores of 10.33 and 15.17 on Cityscapes3D and Waymo, respectively, surpassing Cube R-CNN by +2.11 and +5.74. With text prompts, under identical settings as OVMono3D [74], our method improves " + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "text", + "content": " by +4.73 on Cityscapes3D, +5.68 on Waymo, and +1.1 on 3RScan under target-aware metrics. Both models show low scores on conventional metrics for 3RScan due to severe naming ambiguity and missing annotations. Using 2D ground-truth as box prompts, DetAny3D attains " + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 451, + 556, + 715 + ], + "type": "text", + "content": " of 16.88, 15.83, and 21.36 across the three datasets, outperforming OVMono3D by +6.82, +5.6," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 105, + 553, + 266 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 104 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 104 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 104 + ], + "type": "text", + "content": "Table 1. Zero-shot 3D detection performance comparison on novel categories (left) and novel cameras (right). Results report " + }, + { + "bbox": [ + 55, + 70, + 555, + 104 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{\\mathrm{3D}}" + }, + { + "bbox": [ + 55, + 70, + 555, + 104 + ], + "type": "text", + "content": " with different prompt strategies: (1) Cube R-CNN, (2) Grounding DINO outputs (traditional metric / target-aware metric) and (3) Ground Truth. Target-aware metric uses per-image existing categories for prompting." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 105, + 553, + 266 + ], + "lines": [ + { + "bbox": [ + 58, + 105, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 58, + 105, + 553, + 266 + ], + "type": "table", + "html": "
PromptMethodNovel CategoriesNovel Cameras
APkit3DAPsun3DAPpark3DAPcity3DAPwym3DAP3rs3D
-Cube R-CNN [8]---8.229.43-
Cube R-CNNOVMono3D [74]---4.9710.89-
DetAny3D (ours)---10.3315.17-
Δ---+5.36+4.28-
Grounding DINOOVMono3D [74]4.71 / 4.714.07 / 16.7813.21 / 13.215.88 / 10.989.20 / 10.270.37 / 8.48
DetAny3D (ours)25.73 / 25.737.63 / 21.0724.56 / 24.5611.05 / 15.7115.38 / 15.950.65 / 9.58
Δ+21.02 / +21.02+3.56 / +4.29+11.35 / +11.35+5.17 / +4.73+6.18 / +5.68+0.28 / +1.10
Ground TruthOVMono3D [74]8.4417.1614.1210.0610.2318.05
DetAny3D (ours)28.9639.0957.7216.8815.8321.36
Δ+20.52+21.93+43.60+6.82+5.60+3.31
", + "image_path": "446c14e45132adeb8fa7de90a1ee31c1dd609b2e2e31f19f391ca1a5296821f4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 58, + 303, + 553, + 423 + ], + "blocks": [ + { + "bbox": [ + 55, + 269, + 555, + 301 + ], + "lines": [ + { + "bbox": [ + 55, + 269, + 555, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 269, + 555, + 301 + ], + "type": "text", + "content": "Table 2. In-domain performance comparison between DetAny3D and baselines. The first three columns show results trained only on NuScenes and KITTI, while the next seven columns show results trained on the unified dataset. Two prompt sources are used: (1) Cube R-CNN 2D detections, (2) Ground Truth." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 303, + 553, + 423 + ], + "lines": [ + { + "bbox": [ + 58, + 303, + 553, + 423 + ], + "spans": [ + { + "bbox": [ + 58, + 303, + 553, + 423 + ], + "type": "table", + "html": "
MethodOmni3D_OUTOMni3D
APkit3D↑APnus3D↑APout3D↑APkit3D↑APnus3D↑APsun3D↑APark3D↑APobj3D↑APhyp3D↑AP3D↑
ImVoxelNet [58]23.523.421.5------9.4
SMOKE [45]25.920.420.0------10.4
OV-Uni3DETR [68]35.133.031.6-------
Cube R-CNN [8]36.032.731.932.5030.0615.3341.7350.847.4823.26
OVMono3D [74]w/Cube RCNN---25.4524.3315.2041.6058.877.7522.98
DetAny3D (ours)w/Cube RCNN35.833.932.231.6130.9718.9646.1354.427.1724.92
OVMono3D [74]w/Ground Truth---33.6923.7927.8340.8556.6411.9925.32
DetAny3D (ours)w/Ground Truth38.036.735.938.6837.5546.1450.6256.8215.9834.38
", + "image_path": "d47015f2b7753c66309425332a31d6c53d79bb5088b122a1302179d821678c08.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 434, + 295, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 434, + 295, + 470 + ], + "spans": [ + { + "bbox": [ + 55, + 434, + 295, + 470 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 55, + 434, + 295, + 470 + ], + "type": "inline_equation", + "content": "+3.31" + }, + { + "bbox": [ + 55, + 434, + 295, + 470 + ], + "type": "text", + "content": ", respectively. These results highlight the effectiveness of our architecture and its potential for real-world applications with arbitrary camera configurations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 471, + 295, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 471, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 295, + 590 + ], + "type": "text", + "content": "In-domain Performance We also evaluate our model's in-domain detection capability using two prompt sources: 2D detections from Cube R-CNN and 2D ground-truth. Besides the unified model, we also train DetAny3D on Omni3D_out for comparison. As shown in Table 2, DetAny3D achieves competitive results with Cube R-CNN when provided with aligned input. Using GT prompts, DetAny3D outperforms OVMono3D by " + }, + { + "bbox": [ + 55, + 471, + 295, + 590 + ], + "type": "inline_equation", + "content": "9.06\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 55, + 471, + 295, + 590 + ], + "type": "text", + "content": ", indicating that Cube R-CNN may bottleneck performance, and stronger 2D prompts could further boost results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 599, + 241, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 599, + 241, + 612 + ], + "spans": [ + { + "bbox": [ + 55, + 599, + 241, + 612 + ], + "type": "text", + "content": "4.3. Possible Applications of DetAny3D" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 616, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 616, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 616, + 295, + 665 + ], + "type": "text", + "content": "Other than robustly detecting diverse corner cases in real-world tasks such as autonomous driving and embodied perception, DetAny3D's open-world detection results can further serve as inputs for advanced downstream tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "content": "3D Bounding Box Guided Video Generation. We feed DetAny3D outputs into Sora for zero-shot, open-world 3D box guided video generation. As shown in Figure 3, we compare: (i) image + 3D box + text, (ii) image + 2D box +" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 437, + 555, + 626 + ], + "blocks": [ + { + "bbox": [ + 316, + 437, + 555, + 626 + ], + "lines": [ + { + "bbox": [ + 316, + 437, + 555, + 626 + ], + "spans": [ + { + "bbox": [ + 316, + 437, + 555, + 626 + ], + "type": "image", + "image_path": "ed02c5299eea3f9d7fbc6b3c62c743a443009cb116b41fb478d5e440aac3c5ac.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 632, + 555, + 698 + ], + "lines": [ + { + "bbox": [ + 313, + 632, + 555, + 698 + ], + "spans": [ + { + "bbox": [ + 313, + 632, + 555, + 698 + ], + "type": "text", + "content": "Figure 3. Zero-Shot Transfer Video Generation via Sora. We provide Sora with Internet-sourced images. As shown, when controlled with 3D bounding box, Sora can better capture the scene's geometric relationships. In contrast, with only controlled by 2D bounding box prompt, Sora respects pixel-level spatial cues but fails to generate accurate geometric offset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 70, + 218, + 178 + ], + "blocks": [ + { + "bbox": [ + 66, + 70, + 218, + 178 + ], + "lines": [ + { + "bbox": [ + 66, + 70, + 218, + 178 + ], + "spans": [ + { + "bbox": [ + 66, + 70, + 218, + 178 + ], + "type": "image", + "image_path": "6a784baaa6ef36e1634481d27759d2e17f84374a49fb217170a3a5e045d2c4ef.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 228, + 71, + 380, + 178 + ], + "blocks": [ + { + "bbox": [ + 228, + 71, + 380, + 178 + ], + "lines": [ + { + "bbox": [ + 228, + 71, + 380, + 178 + ], + "spans": [ + { + "bbox": [ + 228, + 71, + 380, + 178 + ], + "type": "image", + "image_path": "8e233f369a30d7442cf51e6289fad6a16356f81be236d69265f5e593d7e2a865.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 390, + 72, + 543, + 178 + ], + "blocks": [ + { + "bbox": [ + 390, + 72, + 543, + 178 + ], + "lines": [ + { + "bbox": [ + 390, + 72, + 543, + 178 + ], + "spans": [ + { + "bbox": [ + 390, + 72, + 543, + 178 + ], + "type": "image", + "image_path": "84b06d4981e3cbde7098dfcd8a1289b9dea2d415eaa35bc9e8fb96624d9892b4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 66, + 180, + 543, + 289 + ], + "blocks": [ + { + "bbox": [ + 66, + 180, + 543, + 289 + ], + "lines": [ + { + "bbox": [ + 66, + 180, + 543, + 289 + ], + "spans": [ + { + "bbox": [ + 66, + 180, + 543, + 289 + ], + "type": "image", + "image_path": "163cf4992709a31605b860aa94bd7ad353ee5991161c95956f4b52deca9d5aaa.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 298, + 555, + 332 + ], + "lines": [ + { + "bbox": [ + 54, + 298, + 555, + 332 + ], + "spans": [ + { + "bbox": [ + 54, + 298, + 555, + 332 + ], + "type": "text", + "content": "Figure 4. Qualitative Results. We present qualitative examples from open-world detection. In each pair of images, the top row is produced by OVMono3D, and the bottom row by DetAny3D. For each example, the left sub-figure overlays the projected 3D bounding boxes, while the right sub-figure shows the corresponding bird's-eye view with " + }, + { + "bbox": [ + 54, + 298, + 555, + 332 + ], + "type": "inline_equation", + "content": "1\\mathrm{m} \\times 1\\mathrm{m}" + }, + { + "bbox": [ + 54, + 298, + 555, + 332 + ], + "type": "text", + "content": " grids as the background." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 58, + 398, + 296, + 473 + ], + "blocks": [ + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "lines": [ + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "spans": [ + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "type": "text", + "content": "Table 3. Ablation study of DetAny3D. The table shows the impact of different design choices on " + }, + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{\\mathrm{3D}}" + }, + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "type": "text", + "content": " performance. Each component is progressively added. To save resources, ablations are conducted on " + }, + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 55, + 345, + 295, + 390 + ], + "type": "text", + "content": " of the full training dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 398, + 296, + 473 + ], + "lines": [ + { + "bbox": [ + 58, + 398, + 296, + 473 + ], + "spans": [ + { + "bbox": [ + 58, + 398, + 296, + 473 + ], + "type": "table", + "html": "
Depth&Cam.Merge DINO2D Agg.ZEMAP3D ↑
----5.81
---10.10
--20.20
-23.21
25.80
", + "image_path": "8e4047d9fac1f5250a5333356177ee2f9ce5fdb7d68739d4655955b268b10c54.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 481, + 295, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 481, + 295, + 506 + ], + "spans": [ + { + "bbox": [ + 55, + 481, + 295, + 506 + ], + "type": "text", + "content": "text, and (iii) image + text. With 3D box constraints, Sora generates videos better aligned with intent." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 514, + 156, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 514, + 156, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 514, + 156, + 525 + ], + "type": "text", + "content": "4.4. Ablation Studies" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 531, + 295, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 531, + 295, + 602 + ], + "spans": [ + { + "bbox": [ + 55, + 531, + 295, + 602 + ], + "type": "text", + "content": "As shown in Table 3, we ablate key components of DetAny3D, showing the evolution from a SAM-based baseline to DetAny3D with strong 3D generalization. The base model extends SAM with 3D box tokens and a 3D head for direct box prediction. Additional ablations, including backbone and prompt types, are in Supplementary Section 9." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 605, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 605, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 676 + ], + "type": "text", + "content": "- Effectiveness of Depth & Camera Modules. Depth map provides denser supervision, while camera configuration intrinsic help mitigate disruptions caused by multiple datasets training. Integrating both depth map and camera intrinsic yields improvement in 3D feature extraction and generalization across diverse datasets." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "- Effectiveness of Merging Depth-Pretrained DINO. Incorporating depth-pretrained DINO yields remarkable improvements, demonstrating that the rich geometric in" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 346, + 555, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 346, + 555, + 370 + ], + "spans": [ + { + "bbox": [ + 322, + 346, + 555, + 370 + ], + "type": "text", + "content": "formation from DINO effectively compensates for SAM's limited geometric understanding." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 371, + 555, + 490 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 314, + 371, + 555, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 371, + 555, + 430 + ], + "spans": [ + { + "bbox": [ + 314, + 371, + 555, + 430 + ], + "type": "text", + "content": "- Effectiveness of 2D Aggregator. Compared to directly adding the features from two models, the 2D Aggregator reduces conflicts between different foundation models, further unleashing the performance gains from two foundation model integration." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 430, + 554, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 430, + 554, + 490 + ], + "spans": [ + { + "bbox": [ + 314, + 430, + 554, + 490 + ], + "type": "text", + "content": "- Effectiveness of ZEM. ZEM mechanism integrates geometric features through zero-initialized layers, which enables stable 2D-to-3D knowledge transfer during training across datasets with diverse camera parameters, scenes, and depth distributions." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 498, + 425, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 425, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 425, + 510 + ], + "type": "text", + "content": "4.5. Qualitative Results" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 515, + 555, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 515, + 555, + 563 + ], + "spans": [ + { + "bbox": [ + 313, + 515, + 555, + 563 + ], + "type": "text", + "content": "We provide qualitative comparisons with OVMono3D. As shown in Figure 4, our model predicts more accurate intrinsics when the camera parameters are unknown and infers more consistent camera poses and 3D detections." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 574, + 391, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 574, + 391, + 586 + ], + "spans": [ + { + "bbox": [ + 313, + 574, + 391, + 586 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "type": "text", + "content": "We propose DetAny3D, a promptable 3D detection foundation model that can detect arbitrary 3D objects from any monocular image input. DetAny3D exhibits significant zero-shot detection capabilities across diverse domains and effective zero-shot transfer across various tasks, highlighting its suitability for real-world deployment in dynamic and unstructured environments. Moreover, its flexible and robust detection ability opens the door to gathering large-scale, multi-source data for more 3D perception-guided tasks, paving the way toward open-world systems." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 297, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 297, + 236 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 297, + 236 + ], + "type": "text", + "content": "We sincerely thank Jiazhi Yang, Tianyu Li, Haochen Tian, Jisong Cai, and Li Chen for their invaluable discussions and constructive feedback throughout this project. Their insights and expertise have contributed significantly to the success of this work. We also appreciate the continuous support and encouragement from all the members of OpenDriveLab. This work is supported by the National Key Research and Development Program of China (2024YFE0210700), the National Natural Science Foundation of China (NSFC) under Grants 62206172 and 62432008, and the Shanghai Artificial Intelligence Laboratory. It is also partially funded by Meituan Inc." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 249, + 115, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 249, + 115, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 249, + 115, + 262 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 269, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 61, + 269, + 294, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 269, + 294, + 323 + ], + "spans": [ + { + "bbox": [ + 61, + 269, + 294, + 323 + ], + "type": "text", + "content": "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 326, + 295, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 326, + 295, + 370 + ], + "spans": [ + { + "bbox": [ + 61, + 326, + 295, + 370 + ], + "type": "text", + "content": "[2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In CVPR, 2021. 6, 13" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 372, + 295, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 372, + 295, + 404 + ], + "spans": [ + { + "bbox": [ + 62, + 372, + 295, + 404 + ], + "type": "text", + "content": "[3] Umar Asif, Jianbin Tang, and Stefan Harrer. Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices. In IJCAI, 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 407, + 294, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 407, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 62, + 407, + 294, + 461 + ], + "type": "text", + "content": "[4] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Yuri Feigin, Peter Fu, Thomas Gebauer, Daniel Kurz, Tal Dimry, Brandon Joffe, Arik Schwartz, et al. Arkitsscenes: A diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. In NeurIPS Datasets, 2021. 6, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 464, + 294, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 464, + 294, + 518 + ], + "spans": [ + { + "bbox": [ + 62, + 464, + 294, + 518 + ], + "type": "text", + "content": "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 520, + 294, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 520, + 294, + 553 + ], + "spans": [ + { + "bbox": [ + 62, + 520, + 294, + 553 + ], + "type": "text", + "content": "[6] Georg Biegelbauer and Markus Vincze. Efficient 3d object detection by fitting superquadrics to range image data for robot's object manipulation. In ICRA, 2007. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 555, + 294, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 555, + 294, + 609 + ], + "spans": [ + { + "bbox": [ + 62, + 555, + 294, + 609 + ], + "type": "text", + "content": "[7] Aleksei Bochkovskii, Amaël Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 613, + 294, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 613, + 294, + 656 + ], + "spans": [ + { + "bbox": [ + 62, + 613, + 294, + 656 + ], + "type": "text", + "content": "[8] Garrick Brazil, Abhinav Kumar, Julian Straub, Nikhila Ravi, Justin Johnson, and Georgia Gkioxari. Omni3d: A large benchmark and model for 3d object detection in the wild. In CVPR, 2023. 2, 3, 5, 6, 7, 13" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 658, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 658, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 658, + 294, + 712 + ], + "type": "text", + "content": "[9] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 3, 6, 13" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "text", + "content": "[10] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In ICCV, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 119, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 119, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 119, + 553, + 150 + ], + "type": "text", + "content": "[11] Sergio Casas, Abbas Sadat, and Raquel Urtasun. Mp3: A unified model to map, perceive, predict and plan. In CVPR, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 153, + 553, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 153, + 553, + 186 + ], + "spans": [ + { + "bbox": [ + 316, + 153, + 553, + 186 + ], + "type": "text", + "content": "[12] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE TPAMI, 2024. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 188, + 553, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 188, + 553, + 232 + ], + "spans": [ + { + "bbox": [ + 317, + 188, + 553, + 232 + ], + "type": "text", + "content": "[13] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 234, + 553, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 234, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 317, + 234, + 553, + 266 + ], + "type": "text", + "content": "[14] Xiaozhi Chen, Kaustav Kundu, Ziyu Zhang, Huimin Ma, Sanja Fidler, and Raquel Urtasun. Monocular 3d object detection for autonomous driving. In CVPR, 2016. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 269, + 553, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 269, + 553, + 300 + ], + "spans": [ + { + "bbox": [ + 317, + 269, + 553, + 300 + ], + "type": "text", + "content": "[15] Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In CVPR, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 303, + 553, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 303, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 316, + 303, + 553, + 335 + ], + "type": "text", + "content": "[16] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. In ICLR, 2023. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 338, + 553, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 338, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 317, + 338, + 553, + 380 + ], + "type": "text", + "content": "[17] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 383, + 553, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 415 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 415 + ], + "type": "text", + "content": "[18] Saumitro Dasgupta, Kuan Fang, Kevin Chen, and Silvio Savarese. Delay: Robust spatial layout estimation for cluttered indoor scenes. In CVPR, 2016. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 418, + 553, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 418, + 553, + 450 + ], + "spans": [ + { + "bbox": [ + 316, + 418, + 553, + 450 + ], + "type": "text", + "content": "[19] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. In NeurIPS, 2014. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 453, + 553, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 453, + 553, + 485 + ], + "spans": [ + { + "bbox": [ + 316, + 453, + 553, + 485 + ], + "type": "text", + "content": "[20] Hao-Shu Fang, Chenxi Wang, Minghao Gou, and Cewu Lu. Graspnet-1billion: A large-scale benchmark for general object grasping. In CVPR, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 487, + 553, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 487, + 553, + 529 + ], + "spans": [ + { + "bbox": [ + 317, + 487, + 553, + 529 + ], + "type": "text", + "content": "[21] Nils Gählert, Nicolas Jourdan, Marius Cordts, Uwe Franke, and Joachim Denzler. Cityscapes 3d: Dataset and benchmark for 9 dof vehicle detection. arXiv preprint arXiv:2006.07864, 2020. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 533, + 553, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 533, + 553, + 575 + ], + "spans": [ + { + "bbox": [ + 317, + 533, + 553, + 575 + ], + "type": "text", + "content": "[22] Ruiyuan Gao, Kai Chen, Enze Xie, HONG Lanqing, Zhenguo Li, Dit-Yan Yeung, and Qiang Xu. Magicdrive: Street view generation with diverse 3d geometry control. In ICLR, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 578, + 553, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 578, + 553, + 621 + ], + "spans": [ + { + "bbox": [ + 317, + 578, + 553, + 621 + ], + "type": "text", + "content": "[23] Ruiyuan Gao, Kai Chen, Zhihao Li, Lanqing Hong, Zhenguo Li, and Qiang Xu. Magicdrive3d: Controllable 3d generation for any-view rendering in street scenes. arXiv preprint arXiv:2405.14475, 2024. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 624, + 553, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 656 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 656 + ], + "type": "text", + "content": "[24] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. *IJRR*, 2013. 3, 6, 13" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "type": "text", + "content": "[25] Jakob Geyer, Yohannes Kassahun, Mentor Mahmudi, Xavier Ricou, Rupesh Durgesh, Andrew S Chung, Lorenz Hauswald, Viet Hoang Pham, Maximilian Mühlegg, Sebastian Dorn, et al. A2d2: Audi autonomous driving dataset. arXiv preprint arXiv:2004.06320, 2020. 6" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "type": "text", + "content": "[26] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 296, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 296, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 296, + 194 + ], + "type": "text", + "content": "[27] Ziyu Guo*, Renrui Zhang*, Xiangyang Zhu, Yiwen Tang, Xianzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 196, + 296, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 196, + 296, + 250 + ], + "spans": [ + { + "bbox": [ + 56, + 196, + 296, + 250 + ], + "type": "text", + "content": "[28] Ziyu Guo*, Renrui Zhang*#, Xiangyang Zhu, Chengzhuo Tong, Peng Gao, Chunyuan Li, and Pheng-Ann Heng. Sam2point: Segment any 3d as videos in zero-shot and promptable manners. arXiv preprint arXiv:2408.16768, 2024.3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 252, + 296, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 252, + 296, + 306 + ], + "spans": [ + { + "bbox": [ + 56, + 252, + 296, + 306 + ], + "type": "text", + "content": "[29] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025.3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 308, + 296, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 308, + 296, + 352 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 296, + 352 + ], + "type": "text", + "content": "[30] Xiankang He, Guangkai Xu, Bo Zhang, Hao Chen, Ying Cui, and Dongyan Guo. Diffcalib: Reformulating monocular camera calibration as diffusion-based dense incident map generation. arXiv preprint arXiv: 2405.15619, 2024. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 353, + 296, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 353, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 353, + 296, + 396 + ], + "type": "text", + "content": "[31] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In CVPR, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 397, + 296, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 397, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 56, + 397, + 296, + 441 + ], + "type": "text", + "content": "[32] Jin-Cheng Jhang, Tao Tu, Fu-En Wang, Ke Zhang, Min Sun, and Cheng-Hao Kuo. V-mind: Building versatile monocular indoor 3d detector with diverse 2d annotations. In WACV, 2025. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 443, + 296, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 443, + 296, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 443, + 296, + 487 + ], + "type": "text", + "content": "[33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 2, 3, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 488, + 296, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 488, + 296, + 520 + ], + "spans": [ + { + "bbox": [ + 56, + 488, + 296, + 520 + ], + "type": "text", + "content": "[34] Tobias Koch, Lukas Liebel, Friedrich Fraundorfer, and Marco Korner. Evaluation of cnn-based single-image depth estimation methods. In ECCVW, 2018. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 522, + 296, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 296, + 565 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 296, + 565 + ], + "type": "text", + "content": "[35] Maksim Kolodiazhnyi, Anna Vorontsova, Matvey Skripkin, Danila Rukhovich, and Anton Konushin. Unidet3d: Multi-dataset indoor 3d object detection. arXiv preprint arXiv:2409.04234, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 567, + 296, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 567, + 296, + 600 + ], + "spans": [ + { + "bbox": [ + 56, + 567, + 296, + 600 + ], + "type": "text", + "content": "[36] Buyu Li, Wanli Ouyang, Lu Sheng, Xingyu Zeng, and Xiaogang Wang. Gs3d: An efficient 3d object detection framework for autonomous driving. In CVPR, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 601, + 296, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 296, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 296, + 645 + ], + "type": "text", + "content": "[37] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In CVPR, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "type": "text", + "content": "[38] Xiaofan Li, Yifu Zhang, and Xiaoqing Ye. Drivingdiffusion: Layout-guided multi-view driving scenarios video generation with latent diffusion model. In European Conference on Computer Vision, 2024. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "type": "text", + "content": "[39] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer:" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE TPAMI, 2024. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 97, + 553, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 97, + 553, + 129 + ], + "spans": [ + { + "bbox": [ + 316, + 97, + 553, + 129 + ], + "type": "text", + "content": "[40] Zhuoling Li, Xiaogang Xu, SerNam Lim, and Hengshuang Zhao. Unimode: Unified monocular 3d object detection. In CVPR, 2024. 2, 3, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 131, + 553, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 131, + 553, + 175 + ], + "spans": [ + { + "bbox": [ + 316, + 131, + 553, + 175 + ], + "type": "text", + "content": "[41] Tingting Liang, Hongwei Xie, Kaicheng Yu, Zhongyu Xia, Zhiwei Lin, Yongtao Wang, Tao Tang, Bing Wang, and Zhi Tang. Bevfusion: A simple and robust lidar-camera fusion framework. In NeurIPS, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 177, + 553, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 177, + 553, + 220 + ], + "spans": [ + { + "bbox": [ + 316, + 177, + 553, + 220 + ], + "type": "text", + "content": "[42] Xuewu Lin, Tianwei Lin, Zixiang Pei, Lichao Huang, and Zhizhong Su. Sparse4d: Multi-view 3d object detection with sparse spatial-temporal fusion. arXiv preprint arXiv:2211.10581, 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 223, + 553, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 223, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 316, + 223, + 553, + 255 + ], + "type": "text", + "content": "[43] Luyang Liu, Hongyu Li, and Marco Gruteser. Edge assisted real-time object detection for mobile augmented reality. In MobiCom, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 257, + 553, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 257, + 553, + 311 + ], + "spans": [ + { + "bbox": [ + 316, + 257, + 553, + 311 + ], + "type": "text", + "content": "[44] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In ECCV, 2024. 2, 3, 5, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 313, + 553, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 313, + 553, + 346 + ], + "spans": [ + { + "bbox": [ + 316, + 313, + 553, + 346 + ], + "type": "text", + "content": "[45] Zechen Liu, Zizhang Wu, and Roland Tóth. Smoke: Single-stage monocular 3d object detection via keypoint estimation. In CVPRW, 2020. 3, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 348, + 553, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 348, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 316, + 348, + 553, + 380 + ], + "type": "text", + "content": "[46] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 383, + 553, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 405 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 405 + ], + "type": "text", + "content": "[47] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 407, + 553, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 407, + 553, + 440 + ], + "spans": [ + { + "bbox": [ + 316, + 407, + 553, + 440 + ], + "type": "text", + "content": "[48] Xinzhu Ma, Wanli Ouyang, Andrea Simonelli, and Elisa Ricci. 3d object detection from images for autonomous driving: a survey. IEEE TPAMI, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 441, + 553, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 441, + 553, + 474 + ], + "spans": [ + { + "bbox": [ + 316, + 441, + 553, + 474 + ], + "type": "text", + "content": "[49] Jiageng Mao, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. 3d object detection for autonomous driving: A comprehensive survey. IJCV, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 475, + 553, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 475, + 553, + 520 + ], + "spans": [ + { + "bbox": [ + 316, + 475, + 553, + 520 + ], + "type": "text", + "content": "[50] Yinyu Nie, Xiaoguang Han, Shihui Guo, Yujuan Zheng, Jian Chang, and Jian Jun Zhang. Total3dunderstanding: Joint layout, object pose and mesh reconstruction for indoor scenes from a single image. In CVPR, 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 521, + 553, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 521, + 553, + 576 + ], + "spans": [ + { + "bbox": [ + 316, + 521, + 553, + 576 + ], + "type": "text", + "content": "[51] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. TMLR, 2024. 2, 3, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 578, + 553, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 578, + 553, + 610 + ], + "spans": [ + { + "bbox": [ + 316, + 578, + 553, + 610 + ], + "type": "text", + "content": "[52] Youngmin Park, Vincent Lepetit, and Woontack Woo. Multiple 3d object tracking for augmented reality. In ISMAR, 2008. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 613, + 553, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 553, + 667 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 553, + 667 + ], + "type": "text", + "content": "[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "text", + "content": "[54] Luigi Piccinelli, Yung-Hsu Yang, Christos Sakaridis, Mattia Segu, Siyuan Li, Luc Van Gool, and Fisher Yu. Unidepth: Universal monocular metric depth estimation. In CVPR, 2024. 2, 3, 4, 5, 6, 14" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[55] Zhangyang Qi, Zhixiong Zhang, Ye Fang, Jiaqi Wang, and Hengshuang Zhao. Gpt4scene: Understand 3d scenes from videos with vision-language models. arXiv preprint arXiv:2501.01428, 2025. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "text", + "content": "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 174, + 294, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 294, + 228 + ], + "type": "text", + "content": "[57] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 13" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 230, + 294, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 294, + 273 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 294, + 273 + ], + "type": "text", + "content": "[58] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In WACV, 2022. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "type": "text", + "content": "[59] Daniel Scharstein and Richard Szeliski. A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. IJCV, 2002. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 309, + 294, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 309, + 294, + 353 + ], + "spans": [ + { + "bbox": [ + 56, + 309, + 294, + 353 + ], + "type": "text", + "content": "[60] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beiwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In ECCV, 2024. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 354, + 294, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 354, + 294, + 387 + ], + "spans": [ + { + "bbox": [ + 56, + 354, + 294, + 387 + ], + "type": "text", + "content": "[61] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 6, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 388, + 294, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 294, + 442 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 294, + 442 + ], + "type": "text", + "content": "[62] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 445, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 498 + ], + "type": "text", + "content": "[63] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 500, + 294, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 500, + 294, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 500, + 294, + 544 + ], + "type": "text", + "content": "[64] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. Demon: Depth and motion network for learning monocular stereo. In CVPR, 2017. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 545, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 545, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 545, + 294, + 588 + ], + "type": "text", + "content": "[65] Johanna Wald, Armen Avetisyan, Nassir Navab, Federico Tombari, and Matthias Nießner. Rio: 3d object instance re-localization in changing indoor environments. In ICCV, 2019. 6, 15" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "text", + "content": "[66] Tai Wang, Xinge Zhu, Jiangmiao Pang, and Dahua Lin. Fcos3d: Fully convolutional one-stage monocular 3d object detection. In ICCV, 2021. 3, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 624, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 677 + ], + "type": "text", + "content": "[67] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. Embodiedscan: A holistic multimodal 3d perception suite towards embodied ai. In CVPR, 2024. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "text", + "content": "[68] Zhenyu Wang, Ya-Li Li, Xi Chen, Hengshuang Zhao, and Shengjin Wang. Uni3detr: Unified 3d detection transformer. In NeurIPS, 2023. 2, 7" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "type": "text", + "content": "[69] Zhenyu Wang, Yali Li, Taichi Liu, Hengshuang Zhao, and Shengjin Wang. Ov-uni3detr: Towards unified open-vocabulary 3d object detection via cycle-modality propagation. In ECCV, 2024. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "text", + "content": "[70] Benjamin Wilson, William Qi, Tanmay Agarwal, John Lambert, Jagjeet Singh, Siddhesh Khandelwal, Bowen Pan, Ratnesh Kumar, Andrew Hartnett, Jhony Kaesemodel Pontes, et al. Argoverse 2: Next generation datasets for self-driving perception and forecasting. In NeurIPS Datasets, 2023. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 175, + 553, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 217 + ], + "type": "text", + "content": "[71] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In CVPR, 2019. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 219, + 553, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 219, + 553, + 251 + ], + "spans": [ + { + "bbox": [ + 316, + 219, + 553, + 251 + ], + "type": "text", + "content": "[72] Jie Yang, Bingliang Li, Ailing Zeng, Lei Zhang, and Ruimao Zhang. Open-world human-object interaction detection via multi-modal prompts. In CVPR, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 253, + 553, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 253, + 553, + 285 + ], + "spans": [ + { + "bbox": [ + 316, + 253, + 553, + 285 + ], + "type": "text", + "content": "[73] Xiuyu Yang, Yunze Man, Junkun Chen, and Yu-Xiong Wang. Scenecraft: Layout-guided 3d scene generation. In NeurIPS, 2025. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 286, + 553, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 286, + 553, + 330 + ], + "spans": [ + { + "bbox": [ + 316, + 286, + 553, + 330 + ], + "type": "text", + "content": "[74] Jin Yao, Hao Gu, Xuweiyi Chen, Jiayun Wang, and Zezhou Cheng. Open vocabulary monocular 3d object detection. arXiv preprint arXiv:2411.16833, 2024. 2, 3, 5, 6, 7, 13, 15" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 331, + 553, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 331, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 331, + 553, + 376 + ], + "type": "text", + "content": "[75] Kaixin Yao, Longwen Zhang, Xinhao Yan, Yan Zeng, Qixuan Zhang, Lan Xu, Wei Yang, Jiayuan Gu, and Jingyi Yu. Cast: Component-aligned 3d scene reconstruction from anrgb image. arXiv preprint arXiv:2502.12894, 2025. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 376, + 553, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 376, + 553, + 420 + ], + "spans": [ + { + "bbox": [ + 316, + 376, + 553, + 420 + ], + "type": "text", + "content": "[76] Wei Yin, Chi Zhang, Hao Chen, Zhipeng Cai, Gang Yu, Kaixuan Wang, Xiaozhi Chen, and Chunhua Shen. Metric3d: Towards zero-shot metric 3d prediction from a single image. In ICCV, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 422, + 553, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 553, + 454 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 553, + 454 + ], + "type": "text", + "content": "[77] Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In CVPR, 2018. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 456, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 456, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 456, + 553, + 498 + ], + "type": "text", + "content": "[78] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In CVPR, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 500, + 553, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 553, + 543 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 553, + 543 + ], + "type": "text", + "content": "[79] Renrui Zhang, Zhengkai Jiang, Ziyu Guo, Shilin Yan, Junting Pan, Hao Dong, Peng Gao, and Hongsheng Li. Personalize segment anything model with one shot. *ICLR*, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 545, + 553, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 545, + 553, + 589 + ], + "spans": [ + { + "bbox": [ + 316, + 545, + 553, + 589 + ], + "type": "text", + "content": "[80] Renrui Zhang, Han Qiu, Tai Wang, Ziyu Guo, Ziteng Cui, Yu Qiao, Hongsheng Li, and Peng Gao. Monodetr: Depth-guided transformer for monocular 3d object detection. In ICCV, 2023. 3, 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "type": "text", + "content": "[81] Renrui Zhang, Jiaming Han, Chris Liu, Aojun Zhou, Pan Lu, Yu Qiao, Hongsheng Li, and Peng Gao. Llama-adapter: Efficient fine-tuning of large language models with zero-initialized attention. In ICLR, 2024. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 635, + 553, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 635, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 635, + 553, + 689 + ], + "type": "text", + "content": "[82] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739, 2024. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "text", + "content": "[83] Haoyi Zhu, Honghui Yang, Xiaoyang Wu, Di Huang, Sha Zhang, Xianglong He, Hengshuang Zhao, Chunhua Shen, Yu" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 239 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 75, + 72, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 294, + 106 + ], + "type": "text", + "content": "Qiao, Tong He, et al. Ponderv2: Pave the way for 3d foundation model with a universal pre-training paradigm. arXiv preprint arXiv:2310.08586, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 295, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 295, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 295, + 150 + ], + "type": "text", + "content": "[84] Menglong Zhu, Konstantinos G Derpanis, Yinfei Yang, Samarth Brahmbhatt, Mabel Zhang, Cody Phillips, Matthieu Lecce, and Kostas Daniilidis. Single image 3d object detection and pose estimation for grasping. In ICRA, 2014. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "type": "text", + "content": "[85] Ziyu Zhu, Zhuofan Zhang, Xiaojian Ma, Xuesong Niu, Yixin Chen, Baoxiong Jia, Zhidong Deng, Siyuan Huang, and Qing Li. Unifying 3d vision-language understanding via promptable queries. In ECCV, 2024. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "text", + "content": "[86] Yiming Zuo, Karhan Kayan, Maggie Wang, Kevin Jeon, Jia Deng, and Thomas L Griffiths. Towards foundation models for 3d vision: How close are we? arXiv preprint arXiv:2410.10799, 2024. 2" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 207, + 68, + 403, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 68, + 403, + 110 + ], + "spans": [ + { + "bbox": [ + 207, + 68, + 403, + 110 + ], + "type": "text", + "content": "Detect Anything 3D in the Wild Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 123, + 102, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 123, + 102, + 135 + ], + "spans": [ + { + "bbox": [ + 55, + 123, + 102, + 135 + ], + "type": "text", + "content": "6. DA3D" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 145, + 296, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 145, + 296, + 253 + ], + "spans": [ + { + "bbox": [ + 55, + 145, + 296, + 253 + ], + "type": "text", + "content": "DA3D is a unified 3D detection dataset, consists of 16 diverse datasets. It builds upon six datasets in Omni3D—Hypersim [57], ARKitScenes [4], Objectron [2], SUNRGBD [61], KITTI [24], and nuScenes [9]—while partially incorporating an additional 10 datasets to further enhance the scale, diversity, and generalization capabilities of 3D detection models. As shown in Figure 5, DA3D comprises 0.4 million frames (" + }, + { + "bbox": [ + 55, + 145, + 296, + 253 + ], + "type": "inline_equation", + "content": "2.5 \\times" + }, + { + "bbox": [ + 55, + 145, + 296, + 253 + ], + "type": "text", + "content": " the scale of Omni3D), spanning 20 distinct camera configurations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 255, + 296, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 255, + 296, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 296, + 327 + ], + "type": "text", + "content": "The dataset is standardized with the similar structure to Omni3D [8], including monocular RGB images, camera intrinsics, 3D bounding boxes, and depth maps. DA3D is designed to test 3D detection models across a wide variety of environments, camera configurations, and object categories, offering a more comprehensive evaluation setting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 341, + 176, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 341, + 176, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 341, + 176, + 354 + ], + "type": "text", + "content": "6.1. Dataset Composition" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 360, + 292, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 360, + 292, + 372 + ], + "spans": [ + { + "bbox": [ + 55, + 360, + 292, + 372 + ], + "type": "text", + "content": "We categorize the datasets in DA3D based on two aspects:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 373, + 296, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 373, + 296, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 373, + 296, + 445 + ], + "type": "text", + "content": "Indoor vs. Outdoor. As shown in Figure 6 (left), DA3D expands both indoor and outdoor datasets compared to Omni3D. Additionally, the ratio of indoor to outdoor data in DA3D is more balanced than in Omni3D, ensuring a more representative distribution for models trained across diverse environments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 448, + 295, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 448, + 295, + 472 + ], + "spans": [ + { + "bbox": [ + 55, + 448, + 295, + 472 + ], + "type": "text", + "content": "Supervision Types. We also analyze DA3D in terms of the distribution of supervision types (See Figure 6 (right)):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 474, + 294, + 533 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 474, + 237, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 474, + 237, + 485 + ], + "spans": [ + { + "bbox": [ + 56, + 474, + 237, + 485 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 56, + 474, + 237, + 485 + ], + "type": "inline_equation", + "content": "35\\%" + }, + { + "bbox": [ + 56, + 474, + 237, + 485 + ], + "type": "text", + "content": " data provides only depth supervision." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 486, + 280, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 486, + 280, + 497 + ], + "spans": [ + { + "bbox": [ + 56, + 486, + 280, + 497 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 56, + 486, + 280, + 497 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 56, + 486, + 280, + 497 + ], + "type": "text", + "content": " data provide only 3D bounding box annotations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 498, + 294, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 498, + 294, + 519 + ], + "spans": [ + { + "bbox": [ + 56, + 498, + 294, + 519 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 56, + 498, + 294, + 519 + ], + "type": "inline_equation", + "content": "42\\%" + }, + { + "bbox": [ + 56, + 498, + 294, + 519 + ], + "type": "text", + "content": " data contains both depth maps and 3D bounding boxes." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 521, + 247, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 521, + 247, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 521, + 247, + 533 + ], + "type": "text", + "content": "- Intrinsic parameters are available for all data." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 546, + 145, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 546, + 145, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 546, + 145, + 559 + ], + "type": "text", + "content": "6.2. Dataset Splits." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 566, + 295, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 566, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 566, + 295, + 590 + ], + "type": "text", + "content": "For training and evaluation, we follow the dataset splitting strategy used in prior works [8]. Specifically:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 592, + 295, + 674 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 56, + 592, + 295, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 592, + 295, + 615 + ], + "spans": [ + { + "bbox": [ + 56, + 592, + 295, + 615 + ], + "type": "text", + "content": "- We construct the training set by merging training subsets from the original datasets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 616, + 295, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 616, + 295, + 639 + ], + "spans": [ + { + "bbox": [ + 56, + 616, + 295, + 639 + ], + "type": "text", + "content": "- We form the validation set by sampling from the original training data, ensuring balanced representation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 639, + 295, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 639, + 295, + 674 + ], + "spans": [ + { + "bbox": [ + 56, + 639, + 295, + 674 + ], + "type": "text", + "content": "- We use the original validation sets of each dataset as the test set, allowing for direct comparison with previous benchmarks." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "This setup ensures fair evaluation and maintains consistency with existing benchmarks while assessing both indomain and zero-shot generalization capabilities." + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 319, + 126, + 550, + 289 + ], + "blocks": [ + { + "bbox": [ + 319, + 126, + 550, + 289 + ], + "lines": [ + { + "bbox": [ + 319, + 126, + 550, + 289 + ], + "spans": [ + { + "bbox": [ + 319, + 126, + 550, + 289 + ], + "type": "image", + "image_path": "030cd2830111b994f5772d412ec9b32d8c117feabb525c49f2d8dc8a61fd4064.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 300, + 523, + 312 + ], + "lines": [ + { + "bbox": [ + 345, + 300, + 523, + 312 + ], + "spans": [ + { + "bbox": [ + 345, + 300, + 523, + 312 + ], + "type": "text", + "content": "Figure 5. The composition of the DA3D dataset." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 321, + 341, + 545, + 432 + ], + "blocks": [ + { + "bbox": [ + 321, + 341, + 545, + 432 + ], + "lines": [ + { + "bbox": [ + 321, + 341, + 545, + 432 + ], + "spans": [ + { + "bbox": [ + 321, + 341, + 545, + 432 + ], + "type": "image", + "image_path": "f51fa1c38c8264dda2a24ac091ec28f700980b6116fcae49b6d6257c519fa399.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 457, + 555, + 491 + ], + "lines": [ + { + "bbox": [ + 313, + 457, + 555, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 555, + 491 + ], + "type": "text", + "content": "Figure 6. The data distribution of the DA3D dataset. (left): the statistics of indoor and outdoor data. (right): the statistics of data with different supervision categories." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 512, + 419, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 512, + 419, + 525 + ], + "spans": [ + { + "bbox": [ + 314, + 512, + 419, + 525 + ], + "type": "text", + "content": "6.3. Evaluation Setup" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 530, + 554, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 530, + 554, + 566 + ], + "spans": [ + { + "bbox": [ + 313, + 530, + 554, + 566 + ], + "type": "text", + "content": "DA3D is designed to evaluate zero-shot generalization in both novel object categories and novel camera configurations. We define two evaluation settings:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 567, + 554, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 554, + 602 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 554, + 602 + ], + "type": "text", + "content": "Zero-Shot Categories. Following prior work [74], we select partial categories from KITTI, SUNRGBD, and ARKitScenes as unseen classes for zero-shot testing." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 604, + 402, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 604, + 402, + 613 + ], + "spans": [ + { + "bbox": [ + 314, + 604, + 402, + 613 + ], + "type": "text", + "content": "Zero-Shot Datasets." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 616, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 314, + 616, + 553, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 616, + 553, + 639 + ], + "spans": [ + { + "bbox": [ + 314, + 616, + 553, + 639 + ], + "type": "text", + "content": "- We use Cityscapes3D, Waymo, and 3RScan as unseen datasets with novel camera configurations." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 639, + 553, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 639, + 553, + 674 + ], + "spans": [ + { + "bbox": [ + 314, + 639, + 553, + 674 + ], + "type": "text", + "content": "- Cityscapes3D & Waymo introduce new intrinsics and image styles, challenging models to generalize across different camera setups." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 675, + 553, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 675, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 675, + 553, + 712 + ], + "type": "text", + "content": "- 3RScan not only introduces novel camera setups, but also contains unseen object categories, making it useful for testing both category and camera generalization." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 95, + 294, + 205 + ], + "blocks": [ + { + "bbox": [ + 56, + 95, + 294, + 205 + ], + "lines": [ + { + "bbox": [ + 56, + 95, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 95, + 294, + 205 + ], + "type": "image", + "image_path": "21e1019616c40f2263d401db5f334715e72f37dfeb65f72de813273f4361ab13.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 238, + 295, + 261 + ], + "lines": [ + { + "bbox": [ + 55, + 238, + 295, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 238, + 295, + 261 + ], + "type": "text", + "content": "Figure 7. Detailed implementation of camera and depth module from UniDepth." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 280, + 141, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 280, + 141, + 293 + ], + "spans": [ + { + "bbox": [ + 55, + 280, + 141, + 293 + ], + "type": "text", + "content": "7. Model Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 300, + 241, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 241, + 313 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 241, + 313 + ], + "type": "text", + "content": "7.1. Camera and Depth Module Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 317, + 295, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 317, + 295, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 317, + 295, + 352 + ], + "type": "text", + "content": "This section introduces how the camera module and depth module work, predicting intrinsic and camera-aware depth, also related feature." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 353, + 296, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 353, + 296, + 413 + ], + "spans": [ + { + "bbox": [ + 55, + 353, + 296, + 413 + ], + "type": "text", + "content": "As show in Figure 7, the fused feature " + }, + { + "bbox": [ + 55, + 353, + 296, + 413 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{F}}_{\\mathrm{fused}}" + }, + { + "bbox": [ + 55, + 353, + 296, + 413 + ], + "type": "text", + "content": " are input into the camera module, which uses a cross-attention mechanism and a to obtain the camera intrinsic parameters. These intrinsic parameters are then used to generate camera rays. The rays are defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 125, + 421, + 225, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 421, + 225, + 460 + ], + "spans": [ + { + "bbox": [ + 125, + 421, + 225, + 460 + ], + "type": "interline_equation", + "content": "(r _ {1}, r _ {2}, r _ {3}) = \\mathbf {K} ^ {- 1} \\left[ \\begin{array}{l} u \\\\ v \\\\ 1 \\end{array} \\right]", + "image_path": "be40af5af1549e01c3f5d286d5e1cd6903893f23a28ff81ba2b5b61acae676a5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "content": " is the calibration matrix, " + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "content": " are the pixel coordinates, and 1 is a vector of ones. In this context, the homogeneous camera rays " + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "inline_equation", + "content": "(r_x,r_y)" + }, + { + "bbox": [ + 55, + 464, + 296, + 502 + ], + "type": "text", + "content": " are derived from:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 154, + 508, + 197, + 535 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 508, + 197, + 535 + ], + "spans": [ + { + "bbox": [ + 154, + 508, + 197, + 535 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c} r _ {1} \\\\ \\hline r _ {3} \\end{array} , \\frac {r _ {2}}{r _ {3}}\\right)", + "image_path": "bba475ca3757c24fd87f39ff5c56d0ed96a190907ce290761b5951994746dba0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 539, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 539, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 539, + 295, + 586 + ], + "type": "text", + "content": "This dense representation of the camera rays undergoes Laplace Spherical Harmonic Encoding (SHE) [54] to produce the embeddings " + }, + { + "bbox": [ + 55, + 539, + 295, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 55, + 539, + 295, + 586 + ], + "type": "text", + "content": ". These embeddings are then passed to the depth module using the cross-attention mechanism." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 586, + 295, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 295, + 611 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 295, + 611 + ], + "type": "text", + "content": "The depth feature conditioned on the camera embeddings, is computed as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 102, + 622, + 247, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 622, + 247, + 635 + ], + "spans": [ + { + "bbox": [ + 102, + 622, + 247, + 635 + ], + "type": "interline_equation", + "content": "\\mathbf {D} \\mid \\mathbf {C} = \\operatorname {M L P} (\\operatorname {C r o s s A t t n} (\\mathbf {D}, \\mathbf {C}))", + "image_path": "b252238fbbe5d98f1485db49a7d87d21f121366de113853d5529d66fa624f8cf.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 640, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 640, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 640, + 295, + 665 + ], + "type": "text", + "content": "Subsequently, the depth feature is processed through an upsampling head to predict the final depth map." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 672, + 176, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 176, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 176, + 684 + ], + "type": "text", + "content": "7.2.3D Box Head Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "This section introduces the details of the 3D box head. After the query " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " passes through the Geometric Transformer" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 335, + 74, + 536, + 203 + ], + "blocks": [ + { + "bbox": [ + 335, + 74, + 536, + 203 + ], + "lines": [ + { + "bbox": [ + 335, + 74, + 536, + 203 + ], + "spans": [ + { + "bbox": [ + 335, + 74, + 536, + 203 + ], + "type": "image", + "image_path": "8830df15b8218b952f02e4a337c426522f4241f7686a07e520d9210153f77c60.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 376, + 214, + 490, + 226 + ], + "lines": [ + { + "bbox": [ + 376, + 214, + 490, + 226 + ], + "spans": [ + { + "bbox": [ + 376, + 214, + 490, + 226 + ], + "type": "text", + "content": "Figure 8. 3D Box head details." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "spans": [ + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": "and Two-Way Transformer, the model outputs " + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": " contains outputs corresponding to both 3D-related hidden states " + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{3D}" + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": " and prompt hidden states " + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_p" + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": ". We extract the 3D-related output " + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{3D}" + }, + { + "bbox": [ + 313, + 247, + 553, + 295 + ], + "type": "text", + "content": " for further processing." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 295, + 553, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 295, + 553, + 319 + ], + "spans": [ + { + "bbox": [ + 313, + 295, + 553, + 319 + ], + "type": "text", + "content": "Subsequently, " + }, + { + "bbox": [ + 313, + 295, + 553, + 319 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 295, + 553, + 319 + ], + "type": "text", + "content": " is passed through a series of prediction heads as shown in Figure 8." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "content": "We then transform these predictions into the final 3D bounding box parameters and obtain the 3D bounding box " + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "inline_equation", + "content": "(x,y,z,w,h,l,R,S)" + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "content": " for each detected object, where " + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "inline_equation", + "content": "(x,y,z)" + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "content": " denotes the 3D center, " + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "inline_equation", + "content": "(w,h,l)" + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "content": " represent the dimensions, and " + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "inline_equation", + "content": "(R,S)" + }, + { + "bbox": [ + 313, + 319, + 554, + 390 + ], + "type": "text", + "content": " describe the rotation and predicted 3D IoU score." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 399, + 394, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 399, + 394, + 411 + ], + "spans": [ + { + "bbox": [ + 314, + 399, + 394, + 411 + ], + "type": "text", + "content": "7.3. Loss Details" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 417, + 553, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 417, + 553, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 417, + 553, + 441 + ], + "type": "text", + "content": "Depth Loss. The depth module is supervised using the Scale-Invariant Logarithmic (SILog) loss, defined as:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 323, + 450, + 553, + 488 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 450, + 553, + 488 + ], + "spans": [ + { + "bbox": [ + 323, + 450, + 553, + 488 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {d e p t h}} = \\sqrt {\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i} ^ {2} - 0 . 1 5 \\cdot \\left(\\frac {1}{N} \\sum_ {i = 1} ^ {N} \\Delta d _ {i}\\right) ^ {2}} \\tag {10}", + "image_path": "fdd5205f1d548383d657c326d25f7fd8a2d94096644c9594ddfb5be630e84aeb.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "inline_equation", + "content": "\\Delta d_{i} = \\log (d_{i}^{\\mathrm{pred}}) - \\log (d_{i}^{\\mathrm{gt}})" + }, + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 498, + 553, + 524 + ], + "type": "text", + "content": " is the number of valid depth pixels." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "spans": [ + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "text", + "content": "Camera Intrinsic Loss. The camera error is computed with the dense camera rays. For an image with height " + }, + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "text", + "content": " and width " + }, + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 313, + 524, + 553, + 559 + ], + "type": "text", + "content": ", the intrinsic loss is formulated as:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 579, + 553, + 618 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 579, + 553, + 618 + ], + "spans": [ + { + "bbox": [ + 320, + 579, + 553, + 618 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {c a m}} = \\sqrt {\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i} ^ {2} - 1 \\cdot \\left(\\frac {1}{H W} \\sum_ {i = 1} ^ {H W} \\Delta r _ {i}\\right) ^ {2}} \\tag {11}", + "image_path": "1d30edbfc705f12c2dc603f39cff1ba091a66207250b07d910e9dea9bd132094.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 628, + 416, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 628, + 416, + 641 + ], + "spans": [ + { + "bbox": [ + 313, + 628, + 416, + 641 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 628, + 416, + 641 + ], + "type": "inline_equation", + "content": "\\Delta r_{i} = r_{i}^{\\mathrm{pred}} - r_{i}^{\\mathrm{gt}}" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 642, + 553, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 553, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 553, + 665 + ], + "type": "text", + "content": "Detection Loss. The detection loss consists of three components:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 666, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 314, + 666, + 553, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 666, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 314, + 666, + 553, + 689 + ], + "type": "text", + "content": "- Smooth L1 loss for box regression, covering the prediction of center, depth, and dimensions." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 689, + 553, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 689, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 689, + 553, + 712 + ], + "type": "text", + "content": "- Chamfer loss for rotation matrix prediction, ensuring accurate orientation estimation." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 70, + 276, + 175 + ], + "blocks": [ + { + "bbox": [ + 76, + 70, + 276, + 175 + ], + "lines": [ + { + "bbox": [ + 76, + 70, + 276, + 175 + ], + "spans": [ + { + "bbox": [ + 76, + 70, + 276, + 175 + ], + "type": "image", + "image_path": "eee539fd5445d9fba45f510b6fe2a76ffb3eb259a829c7561442ec74d15534e6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 184, + 296, + 251 + ], + "lines": [ + { + "bbox": [ + 55, + 184, + 296, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 184, + 296, + 251 + ], + "type": "text", + "content": "Figure 9. An example on 3RScan. The left image shows the original 3RScan annotations, while the right image presents the detection results from Grounding DINO after feeding in all the 3RScan labels. Severe naming ambiguities (e.g., \"trash can\" vs. \"rubbish bin\") and missing annotations lead to a substantial decrease in the detector's performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 270, + 295, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 270, + 295, + 305 + ], + "spans": [ + { + "bbox": [ + 55, + 270, + 295, + 305 + ], + "type": "text", + "content": "- Mean squared error (MSE) loss for 3D IoU score prediction, which optimizes the confidence estimates of detected objects." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 306, + 269, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 306, + 269, + 318 + ], + "spans": [ + { + "bbox": [ + 67, + 306, + 269, + 318 + ], + "type": "text", + "content": "Combining these terms, the total detection loss is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 326, + 294, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 326, + 294, + 338 + ], + "spans": [ + { + "bbox": [ + 121, + 326, + 294, + 338 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d e t}} = \\mathcal {L} _ {\\mathrm {b o x}} + \\mathcal {L} _ {\\mathrm {r o t}} + \\mathcal {L} _ {\\mathrm {i o u}}, \\tag {12}", + "image_path": "f86f1c5d4087da4c6e75041cb573925b9e5c37ff133e7b388423242a420533e5.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 347, + 182, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 347, + 182, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 347, + 182, + 361 + ], + "type": "text", + "content": "8. Target-aware Metrics" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 367, + 295, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 367, + 295, + 450 + ], + "spans": [ + { + "bbox": [ + 55, + 367, + 295, + 450 + ], + "type": "text", + "content": "In our work, we evaluate both traditional metrics and the target-aware metrics proposed by OVMono3D [74]. Under the target-aware paradigm, rather than prompting the model with all possible classes from an entire dataset, we only prompt it with the classes present in the current image during inference. This is designed to address two key challenges encountered:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 451, + 295, + 522 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 55, + 451, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 451, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 55, + 451, + 295, + 486 + ], + "type": "text", + "content": "- Missing annotations: Comprehensive 3D annotation is often impractical or prohibitively expensive, leading to incomplete ground-truth annotations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 487, + 295, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 487, + 295, + 522 + ], + "spans": [ + { + "bbox": [ + 55, + 487, + 295, + 522 + ], + "type": "text", + "content": "- Naming ambiguity: Datasets may label the same objects with inconsistent category names or annotation policies, creating confusion when merging datasets." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 523, + 295, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 523, + 295, + 630 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 295, + 630 + ], + "type": "text", + "content": "As illustrated in Figure 9, these issues are especially pronounced in the 3RScan [65] dataset. The left side shows the official 3RScan annotations, while the right side shows detections from Grounding DINO, which are largely misaligned with the dataset's labeling conventions. Consequently, traditional evaluation metrics may yield misleading or inconsistent results, whereas target-aware metrics help mitigate these mismatches by restricting the evaluated classes to those actually present in the scene." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 640, + 178, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 640, + 178, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 640, + 178, + 654 + ], + "type": "text", + "content": "9. More Ablation Study" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 660, + 220, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 660, + 220, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 660, + 220, + 673 + ], + "type": "text", + "content": "9.1. Various Prompts Performance" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "In this section, we evaluate different types of prompts, including box prompts, point prompts, and text prompts, both with and without intrinsic prompts. The results on Omni3D" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 337, + 91, + 533, + 137 + ], + "blocks": [ + { + "bbox": [ + 364, + 71, + 504, + 82 + ], + "lines": [ + { + "bbox": [ + 364, + 71, + 504, + 82 + ], + "spans": [ + { + "bbox": [ + 364, + 71, + 504, + 82 + ], + "type": "text", + "content": "Table 4. Various Prompt Performance." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 337, + 91, + 533, + 137 + ], + "lines": [ + { + "bbox": [ + 337, + 91, + 533, + 137 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 533, + 137 + ], + "type": "table", + "html": "
Prompt TypeBoxPointText
w/ Intrinsic Prompt34.3825.1922.31
w/o Intrinsic Prompt32.1624.021.02
", + "image_path": "2064a3cef29d98631876b710c36133004ed5bd90a00421ac59b9bd0bff893d21.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 356, + 213, + 511, + 262 + ], + "blocks": [ + { + "bbox": [ + 313, + 148, + 555, + 204 + ], + "lines": [ + { + "bbox": [ + 313, + 148, + 555, + 204 + ], + "spans": [ + { + "bbox": [ + 313, + 148, + 555, + 204 + ], + "type": "text", + "content": "Table 5. Ablation on different backbones. The table reports " + }, + { + "bbox": [ + 313, + 148, + 555, + 204 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 148, + 555, + 204 + ], + "type": "text", + "content": " scores. We verify the effectiveness of SAM and DINO along two dimensions: (1) whether or not we use the pretrained SAM parameters, and (2) whether adopt the pretrained DINO backbone or ConvNeXt for the depth module." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 356, + 213, + 511, + 262 + ], + "lines": [ + { + "bbox": [ + 356, + 213, + 511, + 262 + ], + "spans": [ + { + "bbox": [ + 356, + 213, + 511, + 262 + ], + "type": "table", + "html": "
Backbonew/ SAMw/o SAM
DINO25.8019.12
ConvNeXt23.1118.27
", + "image_path": "97862b908765a136856a0ed4e484ee47780443596a04ca4bb07f91b048c034cf.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 283, + 555, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 283, + 555, + 355 + ], + "spans": [ + { + "bbox": [ + 313, + 283, + 555, + 355 + ], + "type": "text", + "content": "are presented in Table 4. Each prompt type demonstrates its effectiveness in guiding 3D detection. Besides, on the zero-shot datasets, we observe that omitting intrinsic prompts leads to a significant performance drop (even approaching zero), which further highlights the critical role of intrinsic prompts for reliable depth calibration in unseen scenarios." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 363, + 491, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 363, + 491, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 363, + 491, + 376 + ], + "type": "text", + "content": "9.2. Ablation on Different Backbones" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 381, + 555, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 381, + 555, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 381, + 555, + 501 + ], + "type": "text", + "content": "In this section, we investigate our choice of backbone by comparing the use of SAM and DINO backbones. For DINO, we replace it with ConvNeXt and adopt the same pretraining method proposed by UniDepth. For SAM, we examine its effect by removing the SAM-pretrained weights and training from scratch. As shown in Table 5, SAM's pretrained parameters prove crucial for boosting performance. Meanwhile, compared to ConvNeXt, DINO offers richer geometric representations, resulting in stronger 3D detection performance." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 510, + 463, + 522 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 463, + 522 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 463, + 522 + ], + "type": "text", + "content": "9.3. Ablation on DA3D Dataset" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 528, + 554, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 528, + 554, + 575 + ], + "spans": [ + { + "bbox": [ + 313, + 528, + 554, + 575 + ], + "type": "text", + "content": "We ablate the impact of the DA3D dataset in Tab. 6. The additional data in DA3D primarily improves generalization to novel cameras, as Omni3D contains only two distinctive intrinsics for outdoor scenes." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 585, + 554, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 585, + 554, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 585, + 554, + 629 + ], + "type": "text", + "content": "Table 6. Ablation on training datasets. Unless specified, all models are trained on the Omni3D dataset. For the in-domain setting, prompts are provided by Cube R-CNN, while prompts for novel classes and novel datasets are generated by Grounding DINO." + } + ] + } + ], + "index": 23 + }, + { + "type": "table", + "bbox": [ + 316, + 638, + 553, + 698 + ], + "blocks": [ + { + "bbox": [ + 316, + 638, + 553, + 698 + ], + "lines": [ + { + "bbox": [ + 316, + 638, + 553, + 698 + ], + "spans": [ + { + "bbox": [ + 316, + 638, + 553, + 698 + ], + "type": "table", + "html": "
MethodIn-domain\nAPommi3d\n3DNovel ClassNovel Camera
APkit\n3DAPsun\n3DAPcity\n3DAP3rs\n3D
Cube R-CNN23.26--8.22 / --
OVMono3D22.984.71 / 4.714.07 / 16.785.88 / 10.980.37 / 8.48
DetAny3D24.3323.75 / 23.757.63 / 20.878.31 / 11.680.64 / 9.56
DetAny3DDA3D24.9225.73 / 25.737.63 / 21.0711.05 / 15.710.65 / 9.58
", + "image_path": "7e29ff8cb32bc9588a5c6dba8caf4d5feeecdd9ecd9443329a0af0fad606e977.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "table_body" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 211, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 211, + 85 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 211, + 85 + ], + "type": "text", + "content": "9.4. Ablation on Inference Speed" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 296, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 296, + 173 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 296, + 173 + ], + "type": "text", + "content": "We compare the inference speed of DetAny3D with prior methods in Table 7. DetAny3D runs at 1.5 FPS on a single KITTI image, which is slower than Cube R-CNN (33.3 FPS) and OVMono3D (7.1 FPS). This is a trade-off for stronger generalization across novel categories and cameras, as DetAny3D is designed as a foundation model rather than for real-time deployment." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 203, + 291, + 242 + ], + "blocks": [ + { + "bbox": [ + 88, + 183, + 262, + 194 + ], + "lines": [ + { + "bbox": [ + 88, + 183, + 262, + 194 + ], + "spans": [ + { + "bbox": [ + 88, + 183, + 262, + 194 + ], + "type": "text", + "content": "Table 7. Inference speed comparison on KITTI." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 203, + 291, + 242 + ], + "lines": [ + { + "bbox": [ + 61, + 203, + 291, + 242 + ], + "spans": [ + { + "bbox": [ + 61, + 203, + 291, + 242 + ], + "type": "table", + "html": "
MethodCube R-CNNOVMono3DDetAny3D
FPS ↑33.37.11.5
", + "image_path": "3d4fc0898db5ec9fbe8d3c6ce360302596584579310f43e349d85be44983da8a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 260, + 282, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 260, + 282, + 274 + ], + "spans": [ + { + "bbox": [ + 55, + 260, + 282, + 274 + ], + "type": "text", + "content": "9.5. Per-category Performance on Novel Classes" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 277, + 296, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 296, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 296, + 338 + ], + "type": "text", + "content": "As shown in Table 8, we provide a detailed comparison of per-category " + }, + { + "bbox": [ + 55, + 277, + 296, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 55, + 277, + 296, + 338 + ], + "type": "text", + "content": " on novel classes from the KITTI, SUNRGBD, and ARKitScenes datasets between our DetAny3D and the baseline OVMono3D. DetAny3D shows consistent improvements across most categories." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 348, + 136, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 348, + 136, + 361 + ], + "spans": [ + { + "bbox": [ + 56, + 348, + 136, + 361 + ], + "type": "text", + "content": "10. Limitations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 369, + 296, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 296, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 296, + 464 + ], + "type": "text", + "content": "Text Prompt Process. Our method leverages open-vocabulary 2D detectors such as Grounding DINO to convert text prompts into 2D box prompts. While effective, this strategy may cause semantic loss, as textual nuances are not directly injected into the 3D detection pipeline. Moreover, 2D detectors are known to perform poorly under heavy occlusion or partial visibility, introducing a domain gap when transferring their outputs to 3D tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 464, + 296, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 464, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 55, + 464, + 296, + 535 + ], + "type": "text", + "content": "Inference Efficiency. Although DetAny3D achieves strong generalization across novel categories and camera settings, its inference speed (1.5 FPS) is significantly slower than existing lightweight 3D detectors. This limits its applicability in latency-sensitive scenarios such as real-time robotics or autonomous driving." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 536, + 296, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 296, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 296, + 620 + ], + "type": "text", + "content": "Lack of Temporal Modeling. Our current design operates on single-frame inputs and does not utilize temporal information from video sequences. Incorporating motion cues and enforcing temporal consistency could potentially improve detection accuracy and enable better integration into downstream video-based tasks, such as video knowledge distillation and temporal grounding." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 630, + 185, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 185, + 644 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 185, + 644 + ], + "type": "text", + "content": "11. Licenses and Privacy" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 650, + 294, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 650, + 294, + 674 + ], + "spans": [ + { + "bbox": [ + 55, + 650, + 294, + 674 + ], + "type": "text", + "content": "All data used in this work are obtained from publicly available datasets and are subject to their respective licenses." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 335, + 234, + 535, + 579 + ], + "blocks": [ + { + "bbox": [ + 313, + 203, + 553, + 225 + ], + "lines": [ + { + "bbox": [ + 313, + 203, + 553, + 225 + ], + "spans": [ + { + "bbox": [ + 313, + 203, + 553, + 225 + ], + "type": "text", + "content": "Table 8. Per-category target-aware " + }, + { + "bbox": [ + 313, + 203, + 553, + 225 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{3\\mathrm{D}}" + }, + { + "bbox": [ + 313, + 203, + 553, + 225 + ], + "type": "text", + "content": " comparison on novel classes between DetAny3D and OVMono3D." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 335, + 234, + 535, + 579 + ], + "lines": [ + { + "bbox": [ + 335, + 234, + 535, + 579 + ], + "spans": [ + { + "bbox": [ + 335, + 234, + 535, + 579 + ], + "type": "table", + "html": "
CategoryOVMono3DDetAny3D
Board4.836.02
Printer16.2360.22
Painting2.805.11
Microwave30.3157.21
Tray10.116.70
Podium48.3773.65
Cart47.3133.46
Tram4.7127.90
Easy Categories20.5833.79
Monitor9.4415.95
Bag15.6117.69
Dresser29.0841.75
Keyboard9.139.52
Drawers43.0440.80
Computer7.4412.37
Kitchen Pan9.988.70
Potted Plant6.6626.34
Tissues12.4512.95
Rack10.219.04
Toys5.2416.14
Phone3.894.42
Soundsystem13.226.21
Fireplace13.1630.75
Hard Categories13.4718.05
All Categories16.0523.77
", + "image_path": "63c168899add200908ab32369bdd433eff8a0a4b37295ebfefda64321412b0b9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_content_list.json b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3c25de122ea656b230e5d29b6500cbee69aa6073 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_content_list.json @@ -0,0 +1,3961 @@ +[ + { + "type": "text", + "text": "VisualCloze: A Universal Image Generation Framework via Visual In-Context Learning", + "text_level": 1, + "bbox": [ + 215, + 130, + 782, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhong-Yu Li $^{1,4*}$ Ruoyi Du $^{2,4*}$ Juncheng Yan $^{3,4}$ Le Zhuo $^{4}$ Qilong Wu $^{4}$", + "bbox": [ + 192, + 202, + 803, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhen Li $^{5\\dagger}$ Peng Gao $^{4}$ Zhanyu Ma $^{2}$ Ming-Ming Cheng $^{1\\dagger}$", + "bbox": [ + 254, + 220, + 745, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ VCIP, CS, Nankai University $^{2}$ Beijing University of Posts and Telecommunications", + "bbox": [ + 156, + 238, + 839, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Tsinghua University $^{4}$ Shanghai AI Laboratory $^{5}$ The Chinese University of Hong Kong", + "bbox": [ + 135, + 256, + 861, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project page: https://visualcloze.github.io", + "bbox": [ + 320, + 275, + 671, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understand", + "text_level": 1, + "bbox": [ + 112, + 305, + 184, + 318 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the task", + "bbox": [ + 127, + 319, + 173, + 327 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 305, + 241, + 319 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 171, + 319, + 241, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In-context examples", + "bbox": [ + 223, + 306, + 357, + 319 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 246, + 319, + 292, + 356 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 300, + 319, + 392, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Style +", + "bbox": [ + 163, + 356, + 212, + 364 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Subject =", + "bbox": [ + 163, + 364, + 212, + 375 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 364, + 243, + 375 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 243, + 364, + 334, + 402 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 364, + 362, + 402 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 364, + 416, + 402 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 310, + 535, + 383 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 311, + 645, + 398 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 656, + 311, + 769, + 383 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 772, + 311, + 883, + 398 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dense Prediction", + "bbox": [ + 442, + 386, + 540, + 395 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Pose Estimation", + "bbox": [ + 676, + 386, + 771, + 395 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fill blank grid", + "bbox": [ + 114, + 440, + 187, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "by reasoning", + "bbox": [ + 114, + 452, + 189, + 462 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual Prompt", + "text_level": 1, + "bbox": [ + 194, + 410, + 316, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 422, + 295, + 460 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 422, + 344, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Target", + "bbox": [ + 346, + 410, + 390, + 421 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 422, + 370, + 458 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 431, + 383, + 448 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Extend to diverse tasks", + "bbox": [ + 245, + 470, + 392, + 483 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 404, + 534, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image Restoration", + "bbox": [ + 434, + 478, + 540, + 488 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 404, + 645, + 491 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 656, + 404, + 769, + 474 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 772, + 404, + 883, + 491 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg", + "image_caption": [ + "Subject + Layout + Style" + ], + "image_footnote": [], + "bbox": [ + 129, + 500, + 196, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg", + "image_caption": [ + "Out + Style" + ], + "image_footnote": [], + "bbox": [ + 223, + 500, + 330, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 500, + 433, + 580 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 434, + 500, + 540, + 580 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 568, + 498, + 671, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Subject-driven", + "bbox": [ + 593, + 566, + 679, + 577 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg", + "image_caption": [ + "#", + "$\\therefore m : x = 1$ 或 ${3x} + {4y} + 1 = 0$" + ], + "image_footnote": [], + "bbox": [ + 671, + 500, + 779, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 500, + 885, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 585, + 187, + 660 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 585, + 264, + 661 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 585, + 346, + 645 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg", + "image_caption": [ + "Relighting" + ], + "image_footnote": [], + "bbox": [ + 349, + 585, + 416, + 647 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg", + "image_caption": [ + "Virtual Try-On" + ], + "image_footnote": [], + "bbox": [ + 429, + 585, + 504, + 646 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 585, + 573, + 648 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 573, + 585, + 653, + 648 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg", + "image_caption": [ + "Style Transfer" + ], + "image_footnote": [], + "bbox": [ + 666, + 585, + 735, + 645 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg", + "image_caption": [ + "ansfer" + ], + "image_footnote": [], + "bbox": [ + 736, + 585, + 810, + 645 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 813, + 585, + 883, + 660 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg", + "image_caption": [ + "Editing (Add)" + ], + "image_footnote": [], + "bbox": [ + 114, + 667, + 258, + 717 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 667, + 395, + 731 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 667, + 517, + 731 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 667, + 638, + 731 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg", + "image_caption": [ + "Subject-driven Editing", + "even Editing" + ], + "image_footnote": [], + "bbox": [ + 638, + 667, + 720, + 718 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 667, + 800, + 718 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 803, + 667, + 883, + 731 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg", + "image_caption": [ + "Figure 1. The top left illustrates our universal image generation framework based on visual in-context learning. Given one query of a specific task, the generative model learns the task by observing a few in-context examples presented as demonstrations. For each task, the generation result is indicated by a red box." + ], + "image_footnote": [], + "bbox": [ + 114, + 738, + 207, + 810 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 741, + 295, + 779 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 302, + 741, + 388, + 804 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 741, + 486, + 810 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 738, + 591, + 779 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg", + "image_caption": [ + "Multi-View" + ], + "image_footnote": [], + "bbox": [ + 514, + 779, + 566, + 799 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 741, + 681, + 806 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg", + "image_caption": [ + "Multi-View" + ], + "image_footnote": [], + "bbox": [ + 699, + 739, + 771, + 797 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 815, + 741, + 883, + 801 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07960v3 [cs.CV] 14 Dec 2025", + "bbox": [ + 22, + 276, + 57, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg", + "image_caption": [ + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 106, + 119, + 176, + 162 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg", + "image_caption": [ + "Without In-context Example", + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 179, + 119, + 251, + 162 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 254, + 119, + 325, + 162 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg", + "image_caption": [ + "+One In-context Example" + ], + "image_footnote": [], + "bbox": [ + 107, + 196, + 176, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 196, + 250, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 254, + 196, + 323, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg", + "image_caption": [ + "Visual Prompt", + "face, to generate [IMAGE3] that faces the center of the lens. The last row is: making [IMAGE1] the standing woman the final row is: the woman's frontal face that faces the center of the lens. [IMAGE2] sit down and give the thumbs up. Figure 2. Unseen Tasks : Generalizing to tasks unseen during training via in-context learning. More in-context examples lead to more accurate results." + ], + "image_footnote": [], + "bbox": [ + 106, + 252, + 176, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg", + "image_caption": [ + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 181, + 252, + 250, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 254, + 252, + 325, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg", + "image_caption": [ + "+ Two In-context Examples" + ], + "image_footnote": [], + "bbox": [ + 343, + 140, + 411, + 193 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 140, + 483, + 193 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 140, + 558, + 193 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 196, + 411, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 196, + 483, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 196, + 558, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg", + "image_caption": [ + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 341, + 252, + 411, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg", + "image_caption": [ + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 416, + 252, + 483, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 488, + 252, + 558, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg", + "image_caption": [ + "Without In-context Example", + "Visual Prompt" + ], + "image_footnote": [], + "bbox": [ + 584, + 118, + 656, + 162 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg", + "image_caption": [ + "Target", + "+ Two In-context Example" + ], + "image_footnote": [], + "bbox": [ + 658, + 118, + 728, + 162 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg", + "image_caption": [ + "+ One In-context Example" + ], + "image_footnote": [], + "bbox": [ + 584, + 196, + 655, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 660, + 196, + 727, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 746, + 140, + 815, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 821, + 140, + 888, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg", + "image_caption": [ + "Visual Prompt", + "Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: making [IMAGE1] the standing woman [IMAGE2] sit down and give the thumbs up." + ], + "image_footnote": [], + "bbox": [ + 584, + 251, + 655, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 660, + 251, + 727, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 748, + 195, + 816, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 821, + 196, + 888, + 248 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 746, + 251, + 818, + 295 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 821, + 251, + 890, + 295 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 398, + 325, + 414 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent progress in diffusion models significantly advances various image generation tasks. However, the current mainstream approach remains focused on building task-specific models, which have limited efficiency when supporting a wide range of different needs. While universal models attempt to address this limitation, they face critical challenges, including generalizable task instruction, appropriate task distributions, and unified architectural design. To tackle these challenges, we propose VisualCloze, a universal image generation framework, which supports a wide range of in-domain tasks, generalization to unseen ones, unseen unification of multiple tasks, and reverse generation. Unlike existing methods that rely on language-based task instruction, leading to task ambiguity and weak generalization, we integrate visual in-context learning, allowing models to identify tasks from visual demonstrations. Meanwhile, the inherent sparsity of visual task distributions hampers the learning of transferable knowledge across tasks. To this end, we introduce Graph200K, a graph-structured dataset that establishes various interrelated tasks, enhancing task density and transferable knowledge. Furthermore, we uncover that our unified image generation formulation shared a consistent objective with image infilling, enabling us to leverage the strong generative priors of pre-trained infilling models without modifying the architectures.", + "bbox": [ + 89, + 431, + 483, + 810 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 816, + 220, + 832 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in image generation, propelled by the progress of diffusion models [15, 33, 88], have led to a", + "bbox": [ + 89, + 843, + 483, + 873 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "wide range of applications, including image editing [69], style transfer [64, 81], virtual try-on [11, 12], and personalized generation [38, 54], among others. However, these tasks typically require task-specific models, which limit efficiency and scalability for real-world applications. In recent years, there has been growing interest in universal generative models [27, 39, 44], aiming to handle diverse image generation tasks, even unseen ones, within a single unified framework. Despite significant progress, some critical issues remain to be addressed, such as (1) distinguishable and generalizable task instruction, (2) comprehensive task coverage during training, and (3) a unified model architecture.", + "bbox": [ + 511, + 401, + 906, + 582 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An ideal task instruction is crucial for guiding the model to process the desired task effectively. Existing methods primarily rely on language instructions [27, 44] or task-specific tokens [39] to distinguish the task to be performed. However, the complexity of visual tasks and the inherent gap between vision and language modalities make it hard for the model to understand language-only task descriptions, which leads to task confusion [39] and hinders generalization on unseen tasks [35, 71]. Moreover, pre-learned task-specific tokens constrain the model only to handle seen tasks. In contrast, large language models (LLMs) have successfully achieved unified multi-task modeling, partially due to the rise of in-context learning [5], which allows models to adapt various tasks using only a few demonstrations. We aim to replicate the concept of in-context learning in the pure visual modality, where the model learns the desired task directly from a few visual examples as task demonstrations, as shown in Fig. 1 (Left Top). In this setting, in-context learning shows strong potential for universal image generation. We summarize four key findings: (1) it supports various in-domain tasks with reduced task ambiguity (Fig. 1);", + "bbox": [ + 511, + 583, + 908, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "* Equal contribution", + "bbox": [ + 114, + 887, + 227, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "Corresponding author", + "bbox": [ + 246, + 887, + 367, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg", + "image_caption": [ + "Figure 3. Unseen Tasks: Leveraging in-context learning to unify multiple seen tasks into a single-step unseen task. Left: Unifying the [Depth to Image] and [Relighting] task into a single [Depth to Images with Various Lighting] task. Right: Unifying multiple dense prediction tasks into a joint prediction task. Results without visual context can be found in the appendix." + ], + "image_footnote": [], + "bbox": [ + 99, + 90, + 424, + 296 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 442, + 93, + 903, + 296 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(2) it generalizes to unseen tasks (Fig. 2, Fig. 8); (3) as an unseen strategy for task unification, it can integrate multiple sub-tasks into a single step and generate intermediate results (Fig. 3); (4) it enables reverse generation, i.e., inferring a set of conditions from a given target (Fig. 9). While prior works [1, 3, 4, 43, 66, 71, 82] have also explored in-context learning in vision, they are largely constrained to specific domains (such as dense prediction or style transfer [67, 87]), or simplified generation settings involving only one condition and one target image [43, 60].", + "bbox": [ + 88, + 353, + 482, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "From the perspective of task distribution, visual tasks are inherently sparse compared to those in natural language processing because task-specific datasets [71, 85] for different tasks have minimal overlap [19, 32, 79]. Such sparse task learning isolates the knowledge of each task and limits the model from learning shared features across tasks. Moreover, the weak correlations between tasks hinder knowledge transfer and adaptability to new tasks. However, existing works in multi-task learning [10, 16, 31, 53] have verified the benefits of overlapping knowledge across related tasks. To alleviate the sparsity of visual tasks, we introduce a graph-structured dataset, Graph200K, where each image is associated with annotations spanning five metatasks, i.e., conditional generation [80], IP preservation [76], style transfer [81], image editing [69], and restoration [77]. By combining different conditions, we train the model with a variety of tasks that overlap with each other. Given this highly overlapping and compact task space, our dataset significantly increases task density, allowing the model to learn shared and transferable knowledge more effectively.", + "bbox": [ + 91, + 506, + 482, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the architecture design, it is essential to 1) accommodate flexible task formats [27, 35, 71], ensuring seamless in-context learning, and 2) remain compatible with state-of-the-art models [33, 88] to fully leverage their strong generative priors. In this work, we find that the state-of-the-art image infilling model [33] has a consistent objective with our", + "bbox": [ + 89, + 810, + 482, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in-context learning based universal generative formulation. Specifically, we concatenate all input and output images together, where the objective of a task is to fill the output area. This alignment enables us to build our model upon advanced general-purpose infilling models without additional modifications, achieving powerful universal generation capabilities with minimal data and training costs.", + "bbox": [ + 511, + 354, + 906, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we propose a universal image generation framework, VisualCloze, which fine-tunes FLUX.1-Filldev [33] with interrelated tasks sampled from Graph200K to learn transferable knowledge and support visual in-context learning. As the number of in-context examples increases, we observe enhanced performances and reduced task confusion, enabling the model to support a broad spectrum of in-domain tasks, including conditional generation, image restoration, editing, style transfer, IP-preservation, and their combinations. On unseen tasks, the model also shows a certain degree of generalization ability, as shown in Fig. 2. In summary, our main contributions are as follows:", + "bbox": [ + 511, + 460, + 908, + 641 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose an in-context learning based universal image generation framework that supports a wide range of indomain tasks and exhibits generalization to unseen ones.", + "- We design a graph-structured dataset, Graph200K, which constructs a compact task space, enabling flexible online task sampling and promoting the models to learn shared and transferable knowledge across tasks.", + "- Our unified image generation formulation shares a consistent objective with the state-of-the-art infilling model, enabling exceptional performance through minimal tuning without modifying the structure." + ], + "bbox": [ + 511, + 642, + 903, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 821, + 653, + 837 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Image Generation", + "text_level": 1, + "bbox": [ + 513, + 847, + 689, + 863 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent advances in text-to-image generation have achieved remarkable performance, largely driven by the development", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of autoregressive models [41, 58, 78] and diffusion models [2, 13, 15, 18, 24, 40, 42, 48, 51]. Among these, rectified flow transformers [15, 17, 33, 88] have shown great training efficiency and overall performance. Building on these foundational models, diverse applications have emerged, such as conditional generation [80], style transfer [64], and personalized generation [38]. More recently, universal models that address various tasks [35, 44, 83] have been explored. For example, unified models like OmniGen [71] leverage large vision language models to consolidate multiple tasks into a single framework. Similarly, UniReal [9] unifies image generation tasks as discontinuous video generation. However, they still face issues such as over-reliance on language instructions, isolation and sparsity of visual tasks, and architecture design accommodating flexible task formats. To address these issues, we propose a universal image generation framework that unifies generation tasks as image infilling. Through visual in-context learning and our Graph200K dataset that constructs a denser task space to learn transferable knowledge, our method alleviates ambiguity to support a diverse set of in-domain tasks and generalizes to tasks unseen during training.", + "bbox": [ + 89, + 90, + 483, + 424 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2. Visual In-context Learning", + "text_level": 1, + "bbox": [ + 89, + 460, + 334, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Along with the emergence of large language models, such as GPT-3 [5], in-context learning [14] has been an effective approach to allow the language model to understand and perform complex tasks given a few demonstrations. Early works [21, 22] in vision modality propose image analogies to create an image filter from examples automatically. In recent years, leveraging inpainting model [3, 4, 82], masked image modeling [43, 66, 67], or vision-language model [1, 86], visual in-context learning is proposed to handle more tasks. However, they mainly focus on dense prediction [55, 59, 87] or visual understanding [63]. OmniGen [71] also leverages in-context learning to generalize to unseen domains, e.g., segmenting unseen concepts when the model has learned the segmentation task during training. However, it mainly focuses on simple tasks of dense prediction, and the gap between the unseen and training domains is still limited. Some recent works [34, 43, 60, 68] extend visual in-context learning to image generation, but they are still limited by simple tasks such as conditional generation and dense prediction. Moreover, the sparsity of visual tasks makes it difficult for models to learn transferable and overlapping knowledge across tasks, limiting the generation ability of in-context learning. In contrast, we introduce a graph-structured dataset that supports interrelated tasks and thus constructs a more dense task space, promoting the model to learn shared and transferable knowledge and enhance its adaptability.", + "bbox": [ + 89, + 493, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg", + "image_caption": [ + "Figure 4. Illustration of the proposed Graph200K dataset. Each image is annotated for five meta-tasks, i.e., conditional generation, image restoration, image editing, IP preservation, and style transfer. Using these tasks, we can combine a wide range of complex tasks, such as the bottom of the figure." + ], + "image_footnote": [], + "bbox": [ + 517, + 90, + 908, + 406 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Dataset", + "text_level": 1, + "bbox": [ + 513, + 518, + 602, + 534 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recent works [26, 44, 71] have made great progress in unified image generation. However, their generalization to unseen tasks remains highly limited. We partially attribute this issue to the sparsity and isolation of visual tasks, hindering the model from learning shared features across tasks and handling unseen ones. Moreover, weak correlations between tasks further hinder knowledge transfer, restricting the adaptability of models. Therefore, increasing task density or strengthening task inter-relations helps improve the generalization ability of models via a compact task distribution. In this paper, we take the Subject200K [61] dataset as a starting point and construct our Graph200K dataset by augmenting each image with 49 types of annotations spanning five meta-tasks. This enriched annotation space enables flexible construction of a wide range of related tasks by sampling and combining arbitrary subsets of annotations across different meta-tasks, as illustrated in Fig. 4.", + "bbox": [ + 511, + 545, + 906, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Graph-Structured Multi-Task Dataset", + "text_level": 1, + "bbox": [ + 511, + 816, + 841, + 832 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In natural language processing, tasks overlap significantly, facilitating strong cross-task learning ability. In contrast, visual tasks are inherently distinct, posing challenges for vision models to achieve similar generalization ability via", + "bbox": [ + 511, + 839, + 908, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "instruction tuning. To ease this issue, we introduce a Graph-Structured Multi-Task Dataset. As illustrated in Fig. 4 (a), given a text-to-image dataset, each image is treated as the central node of a graph, around which diverse task annotations are constructed, including those for various spatial conditions, degradations, image editing results, reference image for IP-preservation, and style transfer with various reference styles. The construction process for each task pair is detailed in the next section.", + "bbox": [ + 89, + 90, + 480, + 224 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 4, each task annotation forms a bidirectional edge with the image. Thus, the graph is strongly connected, which means that for any two nodes, bidirectional paths exist between them. In other words, a generation task can be formulated as a path within the graph. The nodes along a path (except the end node) serve as condition images, which is analogous to the question in instruction fine-tuning, while the target image (the end node) plays the role of the answer. Specifically, there are 49 types of nodes in our Graph200K, and we sample up to 134 highly overlapping tasks, making the model learn more compact and shared representations across tasks. Moreover, it enriches the diversity and flexibility of our instruction fine-tuning data. For example, the path reference $\\rightarrow$ editing $\\rightarrow$ image corresponds to the task of image editing with reference, as shown in Fig. 4 bottom.", + "bbox": [ + 91, + 229, + 486, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Dataset Construction", + "text_level": 1, + "bbox": [ + 89, + 484, + 290, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For convenience, we inherit subject-driven data from the Subjects200K [61]. Additionally, 32 different degradations are applied online to the images to acquire restoration data. We summarize the data construction methods in this section for the remaining three tasks.", + "bbox": [ + 89, + 507, + 482, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Conditional generation. Each image is paired with 12 distinct conditions generated by specialized models, including canny edges [6], HED edges [72], Hough lines [20], semantic segmentation maps [37], depth maps [74], shape normal maps [73], and human keypoints [7], following ControlNet [80]. This work extends the conditions by incorporating SAM2 [50] masks, foreground segmentation, and open-world boxes and masks. The foreground segmentation, derived from the RMBG [84], supports diverse tasks such as inpainting and foreground extraction. Open-world bounding boxes are generated through the grounding caption capability of Qwen2-VL [65], which are processed using SAM2 [50] to produce corresponding masks.", + "bbox": [ + 89, + 598, + 482, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Style transfer. We transfer the style of images according to reference in both semantic-variant and semantic-invariant settings. Specifically, the semantic-invariant transfer adopts InstantStyle [64] to preserve the semantic content, while the semantic-variant transfer relies on FLUX.1-Redux-dev [33], using the style embeddings and depth as", + "bbox": [ + 89, + 809, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "conditions. For each image, we randomly generate five stylized versions. Mixing the two tasks pushes the model to follow the in-context examples better to avoid ambiguity.", + "bbox": [ + 511, + 90, + 906, + 137 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Image editing. We design two types of editing tasks, including background-variant and background-invariant editing. The background-invariant editing begins with localizing the subjects. Then, we leverage a large vision-language model, Qwen2-VL [65], to modify the image caption with a new object that replaces the original subject. The image, with the subject masked, is subsequently processed by the FLUX.1-Fill-dev [33] inpainting model to integrate the alternative object into the masked region. The above operation is repeated five times to enrich the dataset. For background-variant editing, the difference lies in the last step, which utilizes FLUX.1-Redux-dev [33] with depth as the condition and the modified caption as the text prompt.", + "bbox": [ + 511, + 148, + 906, + 347 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Other Data", + "text_level": 1, + "bbox": [ + 511, + 354, + 638, + 371 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To further expand the range of tasks and enhance the generalization ability of models, we incorporate several open-source datasets during training, including VITON-HD [11] for virtual try-on and PhotoDoodle [28] for artistic image editing. For image editing tasks, we also extend the dataset with OmniEdit [69]. Specifically, two sub-tasks, i.e., object addition and removal, are used for training. The other editing tasks, such as attribute modification and environment change, are treated as unseen tasks to assess the generalization ability of the trained model. Furthermore, we leverage a portion of high-quality internal data, covering tasks of the drawing process [62] and multi-view generation [29].", + "bbox": [ + 511, + 377, + 908, + 559 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 511, + 571, + 606, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This paper identifies the core challenges in building a universal image generation model, including the need for a clearly defined and generalizable task formulation, visual task sparsity, and the lack of a unified framework for multi-task learning. In the previous section, we addressed the issue of task sparsity by constructing the compact Graph200K dataset. Sec. 4.1 introduces visual in-context learning as the ideal paradigm for universal task formulation. Afterward, Sec. 4.2 considers the image infilling model a unified multi-task framework, achieving strong generalization capabilities with minimal cost.", + "bbox": [ + 511, + 597, + 908, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Visual In-context Learning", + "text_level": 1, + "bbox": [ + 511, + 773, + 758, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Language instructions are usually used to specify the generation definition to handle multiple visual generation tasks with a single generative model. However, due to the gap between vision and language, the text comprehension ability of image generation models remains limited. This issue leads to task confusion [39] in existing universal generative models and weak generalization to unseen tasks. Inspired", + "bbox": [ + 511, + 795, + 908, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg", + "image_caption": [ + "Figure 5. Concatenating images when applying position embeddings. The $L$ images within $C$ in-context examples and the query are first concatenated horizontally. Then, these concatenated rows are concatenated temporally to handle mismatched aspect ratios." + ], + "image_footnote": [], + "bbox": [ + 102, + 97, + 483, + 256 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "by the success of few-shot learning on large language models [5], we recognize that visual context may serve as a more friendly task instruction for visual generative models, given their superior visual understanding capabilities.", + "bbox": [ + 89, + 354, + 482, + 415 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Therefore, in this paper, we re-propose visual in-context learning to build a universal and generalizable image generation system. For the sake of description, here we assume the image input-output of arbitrary conditional generation task as a query consisting of $L - 1$ condition images and a blank target $\\varnothing$ to be completed by the model, i.e., $X = \\mathrm{concat}(\\{x_1,\\dots ,x_{L - 1},\\emptyset \\})$ . In Sec. 5.1, we demonstrate that our method can be extended to more general scenarios, where it can generate images at arbitrary positions and in any quantity rather than just the single image at the end of the query. During training, we randomly provide up to $C$ in-context examples, each containing $L$ images as the query. This strategy ensures the generalization ability of models across different numbers of in-context examples. In our experiments, we show that providing in-context examples as task demonstrations not only helps alleviate task confusion and boost model performance across in-domain tasks [39], but also enhances the generalization ability on unseen tasks.", + "bbox": [ + 89, + 415, + 483, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Unified Multi-task Framework", + "text_level": 1, + "bbox": [ + 89, + 712, + 364, + 727 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Unlike previous visual in-context learning methods that primarily focus on scenarios with a single image condition and a single context [43, 60], in this work, we aim to construct a unified framework capable of handling varying numbers of conditions and contexts, allowing for flexible adaptation to diverse tasks. For ease of description, we first assume all images processed by the model share the same size, $W \\times H$ , and we extend to the scenario with mismatched aspect ratios at the end of this section. In this way, given $C$ in-context examples and the query, each containing $L$ images, all images can be concatenated into a complete grid-Layout image", + "bbox": [ + 89, + 734, + 483, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "with a size of $(L\\times W,(C + 1)\\times H)$ . Then, the model can complete a task by infilling the target grids based on the surrounding context, akin to solving visual cloze puzzles. Therefore, we build our unified framework, VisualCloze, based on the general image infilling architecture capable of handling multiple resolutions.", + "bbox": [ + 511, + 90, + 903, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Consistent with common diffusion-based infilling model designs, our model can be formulated as follows:", + "bbox": [ + 511, + 181, + 903, + 212 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {X} = f (X \\mid T, M), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 642, + 218, + 903, + 237 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $X$ is the concatenated image, with the last grid left blank, $T$ is the language instruction, $M$ is the mask condition, and $\\hat{X}$ represents the inflated result. The mask $M$ is a binary matrix with the size of $(H \\times (C + 1), W \\times L)$ :", + "bbox": [ + 511, + 243, + 903, + 305 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nM (i, j) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} i \\in [ H \\times (C - 1), H \\times C) \\\\ & \\text {a n d} j \\in [ W \\times (L - 1), W \\times L), \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 531, + 311, + 903, + 367 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $M(i,j) = 1$ indicates that the pixel will be masked and generated by the infilling model. Equ. (2) masks the region in the last row and column, i.e., the target image. During training, we also randomly mask one of the first $L - 1$ grids with a probability of 0.5, promoting reverse generation shown in Sec. 5.1. For the inference stage, we can crop $\\hat{X}$ to obtain the target image easily.", + "bbox": [ + 511, + 375, + 906, + 481 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Aligned optimization objective. A key benefit of this design is that our VisualCloze formulation shares a highly consistent objective with general image infilling models without architectural modifications or explicit input conditions. This consistency allows us to directly fine-tune advanced image infilling models using the newly constructed dataset while maximizing the utilization of the prior knowledge of foundation models. In contrast, existing task-specific models often require introducing additional learnable modules [38, 69] or adapting to extra condition inputs [61], which may compromise the native capabilities of the model.", + "bbox": [ + 511, + 494, + 906, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Language instructions. Note that the design of language instruction is also necessary for VisualCloze because it is responsible for defining the grid image layout, describing the caption of the image to be generated, and specifying the task intent when in-context examples are unavailable. In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the $(C + 1)\\times W$ layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. The details about the instructions are available in Appendix A. By restructuring the three components $X$ , $T$ , and $M$ in Equ. (1), we achieve a unified multi-task framework for image generation with the general image infilling paradigm and support in-context learning.", + "bbox": [ + 511, + 689, + 906, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Positional embedding. In the preceding section, all images are concatenated into a grid layout image and we can apply positional embedding (i.e., RoPE [57]) on this large image. However, a potential limitation lies in composing a grid image from in-context examples with varying aspect ratios. To overcome this issue, we leverage the 3D-RoPE in Flux.1-Fill-dev to concatenate the query and in-context examples along the temporal dimension, as shown in Fig. 5, effectively overcoming this issue without introducing any noticeable performance degradation.", + "bbox": [ + 89, + 90, + 483, + 243 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 251, + 308, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use FLUX.1-Fill-dev [33] as our foundation model, considering its outstanding performance among open-source image infilling models. In this work, LoRA [25] is chosen to fine-tune the model instead of fully fine-tuning it to reduce training costs and preserve the capabilities of the foundation model. The resulting LoRA can also be fused with other LoRAs in the community, enabling more widespread applications. Specifically, we set the rank of LoRA as 256. The model is tuned for 20,000 iterations with an accumulated batch size of 64 on $8 \\times \\mathrm{A}100$ GPUs. We employ the AdamW optimizer with a learning rate of $1e^{-4}$ . Following FLUX.1-Fill-dev, we incorporate the lognorm noise strategy with dynamic time shifting. During training, the number of in-context examples is set up to 2 (i.e., $C$ as defined in Sec. 4.2), while $L$ , the number of images involved in a task, varies between 2 and 4 in the Graph200K dataset. During inference, the number of in-context examples can be generalized to a larger number. To balance computational efficiency, each image is resized to the area of $384 \\times 384$ or $512 \\times 512$ before concatenating them into a grid layout. High-resolution outputs can be obtained in practical applications through simple post-up-scaling techniques [45].", + "bbox": [ + 89, + 273, + 483, + 608 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 89, + 619, + 223, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1. Qualitative Analysis of In-context Learning", + "text_level": 1, + "bbox": [ + 89, + 643, + 460, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This section presents a series of experiments demonstrating the effectiveness of in-context learning across different tasks, especially those unseen during training. Based on our extensive experiments, we summarize four key findings that highlight the role of in-context learning.", + "bbox": [ + 89, + 666, + 483, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In-Context Learning Findings 1", + "text_level": 1, + "bbox": [ + 117, + 753, + 328, + 768 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In-context learning can mitigate task confusion for seen tasks.", + "bbox": [ + 114, + 780, + 457, + 809 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Task ambiguity on seen tasks. The model occasionally experiences task confusion, failing to interpret the intended objective accurately, especially on dense prediction tasks. In-context learning effectively alleviates this issue", + "bbox": [ + 89, + 839, + 483, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg", + "image_caption": [ + "(a) Image to Pose" + ], + "image_footnote": [], + "bbox": [ + 514, + 85, + 905, + 143 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 161, + 903, + 218 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg", + "image_caption": [ + "(b) Image to Depth", + "(c) Image to Edge" + ], + "image_footnote": [], + "bbox": [ + 514, + 233, + 903, + 291 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg", + "image_caption": [ + "(d) Normal to Image", + "Figure 6. In-context learning mitigates the task ambiguity in seen tasks. We show three results using different initial noises." + ], + "image_footnote": [], + "bbox": [ + 514, + 304, + 903, + 359 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "by providing task-specific demonstrations. For example, in Fig. 6 (a) and (c), the model may produce noisy results without in-context examples in pose estimation and edge detection, while increasing the number of in-context examples enhances the performance and stability. In depth estimation shown in Fig. 6 (b), in-context examples also improve the accuracy when the model originally makes inaccurate estimates, especially in distant areas. Additionally, in some tasks like conditional generation, we note that the model can generate satisfactory results stably even without in-context examples, as shown in Fig. 6 (d). However, the quantitative comparison in Tab. 1 still shows that using in-context learning can further improve the accuracy of task completion.", + "bbox": [ + 511, + 450, + 906, + 647 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In-Context Learning Findings 2", + "text_level": 1, + "bbox": [ + 540, + 659, + 751, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In-context learning supports generalization to unseen tasks, where providing more in-context examples could lead to more accurate generation.", + "bbox": [ + 537, + 686, + 879, + 732 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generalization on unseen tasks. Beyond mitigating task confusion, in-context learning also enables the model to generalize to tasks unseen during training. Fig. 2 has shown the model can successfully generate frontal faces from side-view images and transfer editing instructions [8] through in-context learning, even though they are not encountered during training. Here, we present additional examples of unseen tasks. For instance, although the model is trained exclusively on image editing tasks involving object addi", + "bbox": [ + 511, + 763, + 906, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg", + "image_caption": [ + "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: change the setting to a winter scene. <\\editing instruction>" + ], + "image_footnote": [], + "bbox": [ + 99, + 84, + 290, + 273 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 302, + 84, + 490, + 273 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg", + "image_caption": [ + "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: turn the color of sunglasses to green. " + ], + "image_footnote": [], + "bbox": [ + 504, + 84, + 699, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 83, + 897, + 276 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg", + "image_caption": [ + "Figure 7. Unseen Tasks: Although the image editing tasks seen by the model are only about object addition and object removal, it can still generalize to other types of editing tasks, such as environment modification (Left) and attribute transformation (Right), through in-context learning. More unseen tasks are shown in Fig. 2." + ], + "image_footnote": [], + "bbox": [ + 96, + 387, + 279, + 566 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg", + "image_caption": [ + "Figure 8. Unseen Tasks: VisualCloze is capable of performing multi-subject driven generation [70], even though the model was only exposed to single subject-driven generation tasks during training. Best viewed by zooming in." + ], + "image_footnote": [], + "bbox": [ + 290, + 387, + 477, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tion and removal, it still generalizes to other types of editing tasks, such as environment changes and attribute modifications, as shown in Fig. 7. Furthermore, as demonstrated in Fig. 8, the model, trained solely on single-subject generation, can generate images preserving identities of multiple subjects. These results highlight that in-context learning is an effective guidance mechanism, enabling adaptation to novel tasks without retraining.", + "bbox": [ + 89, + 667, + 482, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In-Context Learning Findings 3", + "text_level": 1, + "bbox": [ + 117, + 811, + 328, + 828 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In-context learning enables task unification, an unseen strategy that consolidating sub-tasks into a single step and generating intermediate results.", + "bbox": [ + 114, + 839, + 457, + 885 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-task consolidation. Meanwhile, we also find that through in-context learning, we can consolidate multiple tasks into a single execution step, which can be viewed as another form of unseen task. Fig. 3 has shown two examples, where we 1) merge conditional generation and relighting shown on the left and 2) perform depth estimation, surface normal estimation, and edge detection simultaneously shown on the right. Similarly, Fig. 11 illustrates how we can combine multiple conditions for conditional generation to achieve finer control. For instance, generating a portrait based on keypoints provides only rough information about the location and body pose. In such cases, contour conditions can be used to control the attributes of other visual elements.", + "bbox": [ + 511, + 391, + 906, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In-Context Learning Findings 4", + "text_level": 1, + "bbox": [ + 539, + 628, + 751, + 645 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different in-context learning examples lead to varying effects, where examples that can better convey mission intent can achieve better and more stable generation.", + "bbox": [ + 537, + 656, + 879, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Varying effects of different in-context examples. Following prior works [46, 52] on the prompt selection, we also find that different in-context examples could impact the generation quality. Specifically, it is crucial that in-context examples provide correct and strong guidance about the task intention. For example, as shown in Fig. 10 (left), when the side faces are more towards the front than in Fig. 10 (right), the success rate of correctly generating frontal faces has dropped dramatically.", + "bbox": [ + 511, + 763, + 906, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg", + "image_caption": [ + "Two In-Context Examples" + ], + "image_footnote": [], + "bbox": [ + 124, + 89, + 220, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 225, + 89, + 321, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 89, + 426, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 164, + 220, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 225, + 164, + 323, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 164, + 426, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg", + "image_caption": [], + "image_footnote": [ + "Task Prompt: In each row, a method uses[IMAGE1] gray-shaded depth map with distinct edges, [IMAGE2] Artistically rendered content for generating [IMAGE3] High-definition picture in a unique art style." + ], + "bbox": [ + 122, + 241, + 220, + 294 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 241, + 323, + 294 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 241, + 426, + 294 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 89, + 578, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 90, + 681, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 90, + 785, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 794, + 89, + 890, + 162 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 482, + 164, + 580, + 237 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 164, + 681, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 164, + 785, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 794, + 164, + 890, + 238 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg", + "image_caption": [], + "image_footnote": [ + "Task Prompt: Every row demonstrates how to transform [IMAGE1] an image with vivid details into [IMAGE2] gray-scale depth map with clear object boundaries, [IMAGE3] rgb normal map for bump mapping effects, [IMAGE4] soft-edged map from hed detection through a logical approach." + ], + "bbox": [ + 480, + 241, + 580, + 305 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 241, + 683, + 305 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 241, + 787, + 306 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 794, + 241, + 890, + 305 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg", + "image_caption": [ + "Figure 9. Unseen Tasks: Through in-context learning, we can perform reverse generation from targets to conditions. For example, (a) decomposing the layout and style from a stylized image and (b) inferring the image, depth, and surface normal simultaneously from an edge map, which is the reverse task of Fig. 3 (Left)." + ], + "image_footnote": [], + "bbox": [ + 94, + 412, + 274, + 542 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 412, + 468, + 542 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg", + "image_caption": [ + "Figure 10. Illustration of the impact of different in-context examples on in-context learning. In the second example on the left, the left and right faces are too biased towards the front, so they do not show the core goal of the task intention." + ], + "image_footnote": [], + "bbox": [ + 94, + 556, + 279, + 619 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 292, + 556, + 470, + 618 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In-Context Learning Findings 5", + "text_level": 1, + "bbox": [ + 117, + 718, + 328, + 734 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In-context learning can guide bilateral generation, even for the reverse process that is unseen during training.", + "bbox": [ + 114, + 744, + 457, + 791 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Bilateral generation. In addition to generating the target from a set of given conditions, our model also shows the capability of reverse generation, i.e., inferring the underlying conditions from the target. Although our model has randomly treated one condition image as the target when", + "bbox": [ + 89, + 825, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 412, + 642, + 503 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 414, + 764, + 502 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 776, + 414, + 890, + 502 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg", + "image_caption": [ + "Figure 11. Unseen Tasks: Unseen combinations of multiple tasks. For conditional generation, we integrate multiple conditions achieve more precise control. More examples are shown in Fig. 3." + ], + "image_footnote": [ + "Task Prompt: Every row demonstrates how to transform [IMAGE1] human pose with colored lines for bone structure and [IMAGE2] canny map with sharp white edges and dark into [IMAGE3] a visually striking and clear picture through a logical approach." + ], + "bbox": [ + 524, + 505, + 643, + 598 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 650, + 505, + 767, + 598 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 774, + 505, + 893, + 597 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "training as described in Sec. 4.2, it can generalize to a more challenging and unseen setting during inference, i.e., inferring all conditional images from only the target image. For instance, as illustrated in Fig. 9 (left), the model can reverse-engineer both the original and the style reference images given a stylized image, demonstrating the ability to disentangle the content and style representations. Similarly, as shown in Fig. 9 (right), the model can generate the corresponding real image, depth estimation, and surface normal estimation from an edge image, representing the inverse task of Fig. 3 (left). The ability to perform such", + "bbox": [ + 511, + 734, + 906, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/3490ba6341fe53dae081576bfafd9498c242ee534c5c93aff12f356ab80d5505.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ConditionMethodContextControllabilityQualityText Consistency
F1 ↑RMSE ↓FID [23] ↓SSIM ↑MAN-IQA [75] ↑MUSIQ [30] ↑CLIP-Score [49] ↑
CannyControlNet [80]0.13-46.060.340.3145.4534.10
OminiControl [61]0.47-29.580.610.4461.4034.40
OneDiffusion [35]0.39-32.760.550.4659.9934.99
OmniGen [71]0.43-51.580.470.4762.6633.66
Oursdev00.39-30.360.610.4861.1335.03
Oursfill00.35-30.600.550.4964.3934.98
Oursfill10.36-31.340.550.4964.1234.96
Oursfill20.36-31.150.560.4964.0834.85
DepthControlNet [80]-23.7036.830.410.4460.1734.49
OminiControl [61]-21.4436.230.520.4460.1834.08
OneDiffusion [35]-10.3539.030.490.4960.4934.71
OmniGen [71]-15.0786.080.260.4964.9029.72
Oursdev0-25.0642.140.530.4658.9534.80
Oursfill0-10.3133.880.540.4864.8535.10
Oursfill1-9.9134.440.540.4964.3234.95
Oursfill2-9.6834.880.540.4864.2934.89
DeblurControlNet [80]-37.8253.280.490.4561.9233.80
OminiControl [61]-19.7026.170.850.4560.7034.53
OneDiffusion [35]-------
OmniGen [71]-------
Oursdev0-25.0356.760.740.3846.6833.52
Oursfill0-26.5340.590.740.4659.6234.56
Oursfill1-25.8736.930.760.4861.5834.82
Oursfill2-25.5736.280.760.4861.7734.82
", + "bbox": [ + 91, + 88, + 906, + 483 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2973fc1c963afae2723902ff6deb0c428c16ec2f262084265fd41eb908c04ca6.jpg", + "table_caption": [ + "Table 1. Quantitative comparison on conditioning generation and image restoration. The methods that train a specialist for each task are marked as gray color. Except for these methods, the best method is bolded, and the second best method is underlined." + ], + "table_footnote": [], + "table_body": "
MethodContextDINOv2CLIP-ICLIP-T
OminiControl [61]73.1787.7033.53
OneDiffusion [35]73.8886.9134.85
OmniGen [71]67.7383.4334.53
Oursdev078.0587.6835.06
Oursfill080.4189.6335.16
Oursfill179.3389.2235.02
Oursfill280.3289.3635.01
", + "bbox": [ + 106, + 532, + 470, + 676 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "reverse tasks highlights the flexibility and robustness in understanding complex relationships between different types of image representations.", + "bbox": [ + 89, + 772, + 482, + 819 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2. Main Results", + "text_level": 1, + "bbox": [ + 89, + 830, + 230, + 847 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We compare our method with universal generative models, including OmniGen [71] and OneDiffusion [35], as well as specialized models, such as ControlNet [80] and Omni-", + "bbox": [ + 89, + 854, + 483, + 902 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/a120bae33646642c1ee693c6f60b898e6ee0090f7d5c6d025c78f8129c24495c.jpg", + "table_caption": [ + "Table 2. Quantitative comparison for subject-driven image generation. We report clip scores on text alignment and style consistency. Specialists are shaded in gray. Among the remaining methods, the best is emphasized in bold, while the second best is underlined." + ], + "table_footnote": [], + "table_body": "
text↑image↑
InstantStyle [64]0.270.60
OmniGen [71]0.270.52
Oursdev0.300.53
Oursfill0.290.55
", + "bbox": [ + 519, + 532, + 901, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 3. Quantitative comparison for style transfer. We report CLIP scores on text alignment and style consistency. The specialists are indicated in gray. Among the others, the top-performing one is highlighted in bold, and the second best is underlined.", + "bbox": [ + 511, + 636, + 906, + 694 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Control [61]. The details of the evaluation metrics are provided in Appendix C. Additionally, we fine-tune FLUX.1-dev [33] using the same settings as FLUX.1-Fill-dev for comparison and refer to the tuned models as Oursdev and Oursfill. The details of Oursdev are shown in Appendix B.", + "bbox": [ + 511, + 714, + 906, + 792 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For conditional generation and image restoration, we evaluate the models based on three criteria, i.e., controllability, visual quality, and text consistency, following the evaluation approach of OminiControl [61]. As shown in Tab. 1, our framework demonstrates comparable controllability to existing universal methods while achieving superior visual quality and text consistency. Compared to spe", + "bbox": [ + 511, + 795, + 908, + 902 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 509, + 936 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg", + "image_caption": [ + "Figure 12. Comparison between Flux.1-dev (Oursdev) and Flux.1-Fill-dev (Oursfill)." + ], + "image_footnote": [], + "bbox": [ + 91, + 84, + 480, + 272 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "cialized methods, our model performs on par with the best results and even outperforms them on the depth-to-image.", + "bbox": [ + 89, + 342, + 482, + 372 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In the style transfer task, we measure text consistency and style alignment using the CLIP [49] model. As reported in Tab. 3, our method outperforms OmniGen [71] by $2\\%$ and $3\\%$ in text alignment and style consistency, respectively. Even when compared with InstantStyle-Plus [81], a specialized model, we achieve a $2\\%$ improvement in text consistency, with only a slight decrease in style alignment.", + "bbox": [ + 89, + 373, + 482, + 478 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Furthermore, we evaluate the models on subject-driven image generation and report semantic alignment using the DINOv2 [47], CLIP-I [49], and CLIP-T [49] scores. Across all these metrics, our method consistently delivers improvements, as shown in Tab. 2. For example, compared to the specialized model OminiControl [61], we achieve improvements of $7.15\\%$ , $1.66\\%$ , and $1.48\\%$ in these three scores.", + "bbox": [ + 89, + 479, + 482, + 584 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Advantages of the infilling model. Our method (Oursfill) is built on FLUX.1-Fill-dev [33], which shares the same objective as our unified image generation framework. To verify its effectiveness, we also fine-tune Fill.1-dev [33] (Oursdev) using identical settings. Unlike Oursfill, which requires no modifications, Oursdev necessitates model adaptations for universal image generation, as shown in Appendix B. Despite its simplicity, Oursfill achieves superior performance across multiple tasks.", + "bbox": [ + 89, + 598, + 482, + 733 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Tab. 1, $\\text{Ours}_{\\text{dev}}$ achieves a higher F1 score than $\\text{Ours}_{\\text{fill}}$ in the canny-to-image generation. However, in other tasks, $\\text{Ours}_{\\text{fill}}$ demonstrates a significant advantage. For instance, in the depth-to-image generation, $\\text{Ours}_{\\text{fill}}$ reduces RMSE from 25.06 to 10.31. In the deblurring task, $\\text{Ours}_{\\text{fill}}$ achieves superior quality by lowering RMSE while maintaining a higher SSIM. In subject-driven image generation, Tab. 2 shows that $\\text{Ours}_{\\text{fill}}$ consistently outperforms $\\text{Ours}_{\\text{dev}}$ . Additionally, in semantic-invariant style transfer, $\\text{Ours}_{\\text{fill}}$ delivers comparable performance to $\\text{Ours}_{\\text{dev}}$ , as shown in Tab. 3.", + "bbox": [ + 89, + 734, + 482, + 900 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Fig. 12 presents a visual comparison, where Oursfill demonstrates clear advantages over Oursdev. Notably, in the depth-to-image generation, images produced by Oursdev frequently exhibit diagonal streak artifacts, which significantly degrade visual fidelity. Considering the advantages in performance, visual quality, and architectural efficiency, Oursfill stands out as the superior model.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Quantitative comparison on in-context learning. Here, we further analyze the impact of in-context learning on seen tasks. Tab. 1 demonstrates the impact of in-context learning on different image generation tasks. Under the canny condition, our method without in-context examples achieves an FID of 30.60, which improves to 31.15 with two in-context examples. When conditioned on depth, the RMSE decreases from 10.31 to 9.68 as the number of in-context examples increases, indicating enhanced structural consistency. Similarly, in the deblurring task, RMSE decreases from 26.53 to 25.57, reflecting improved fidelity to the original content. These results highlight in-context learning as an effective guidance mechanism, enabling the model to better align with the task intent.", + "bbox": [ + 511, + 210, + 906, + 421 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6. Limitations", + "text_level": 1, + "bbox": [ + 511, + 436, + 635, + 450 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "While our model demonstrates strong stability across most in-domain tasks, it still exhibits some instability in specific tasks, such as object removal. This limitation suggests that the performance is sensitive to certain task characteristics. Additionally, the stability of the model on unseen tasks is still insufficient. Apart from the difficulty of the task and the difference with seen tasks, ambiguous in-context examples may also lead to less stable results, as discussed in Sec. 5.1.", + "bbox": [ + 511, + 460, + 905, + 582 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 603, + 633, + 619 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In this work, we propose VisualCloze, a universal image generation framework that addresses key challenges in existing methods, including generalizable instruction design, appropriate task distributions, and unified architectural design. Rather than relying solely on language-based instructions to convey task intent, we re-propose visual in-context learning, enabling the model to learn tasks from a few demonstrations. This approach improves generalization to unseen tasks and reduces task ambiguity. To overcome the sparsity of visual task distributions, which limits the learning of transferable knowledge, we construct Graph200K, a graph-structured dataset that establishes interrelated tasks. In this compact task space, the model is promoted to learn transferable representations and improve adaptability. Meanwhile, we identify the consistent objective between image infilling and our universal generation formulation, allowing us to seamlessly adapt general-purpose infilling models for universal generation without", + "bbox": [ + 511, + 628, + 906, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "architectural modifications. Experimental results show that our approach supports a diverse set of in-domain tasks using in-context learning while demonstrating strong generalization to unseen tasks.", + "bbox": [ + 89, + 90, + 485, + 151 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 165, + 187, + 181 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022. 3, 4", + "[2] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In ICLR, 2023. 4", + "[3] Ivana Balazevic, David Steiner, Nikhil Parthasarathy, Relja Arandjelovic, and Olivier J Henaff. Towards in-context scene understanding. In NeurIPS, 2023. 3, 4", + "[4] Amir Bar, Yossi Gandelsman, Trevor Darrell, Amir Globerson, and Alexei A Efros. Visual prompting via image inpainting. In NeurIPS, 2022. 3, 4", + "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. *NeurIPS*, 2020. 2, 4, 6", + "[6] John Canny. A computational approach to edge detection. IEEE TPAMI, 1986. 5", + "[7] Z. Cao, G. Hidalgo Martinez, T. Simon, S. Wei, and Y. A. Sheikh. Openpose: Realtime multi-person 2d pose estimation using part affinity fields. IEEE TPAMI, 2019. 5", + "[8] Lan Chen, Qi Mao, Yuchao Gu, and Mike Zheng Shou. Edit transfer: Learning image editing via vision in-context relations. arXiv preprint arXiv:2503.13327, 2025. 7", + "[9] Xi Chen, Zhifei Zhang, He Zhang, Yuqian Zhou, Soo Ye Kim, Qing Liu, Yijun Li, Jianming Zhang, Nanxuan Zhao, Yilin Wang, Hui Ding, Zhe Lin, and Hengshuang. Unireal: Universal image generation and editing via learning real-world dynamics. arXiv preprint arXiv:2412.07774, 2024. 4", + "[10] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3", + "[11] Seunghwan Choi, Sunghyun Park, Minsoo Lee, and Jaegul Choo. Viton-hd: High-resolution virtual try-on via misalignment-aware normalization. In CVPR, 2021. 2, 5", + "[12] Zheng Chong, Xiao Dong, Haoxiang Li, shiyue Zhang, Wenqing Zhang, Hanqing Zhao, xujie zhang, Dongmei Jiang, and Xiaodan Liang. CatVTON: Concatenation is all you need for virtual try-on with diffusion models. In ICLR, 2025. 2", + "[13] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In NeurIPS, 2021. 4" + ], + "bbox": [ + 93, + 191, + 482, + 898 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Tianyu Liu, Baobao Chang, Xu Sun, Lei Li, and Zhifang Sui. A survey on in-context learning. arXiv preprint arXiv:2301.00234, 2024. 4", + "[15] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 4", + "[16] Christopher Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3", + "[17] Peng Gao, Le Zhuo, Dongyang Liu, Ruoyi Du, Xu Luo, Longtian Qiu, Yuhang Zhang, Chen Lin, Rongjie Huang, Shijie Geng, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 4", + "[18] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2024. 4", + "[19] Golnaz Ghiasi, Barret Zoph, Ekin D. Cubuk, Quoc V. Le, and Tsung-Yi Lin. Multi-task self-training for learning general representations. In ICCV, 2021. 3", + "[20] Geonmo Gu, Byungsoo Ko, SeoungHyun Go, Sung-Hyun Lee, Jingeun Lee, and Minchul Shin. Towards light-weight and real-time line segment detection. In AAAI, 2022. 5", + "[21] Aaron Hertzmann. Algorithms for rendering in artistic styles. PhD thesis, New York University, Graduate School of Arts and Science, 2001. 4", + "[22] Aaron Hertzmann, Charles E. Jacobs, Nuria Oliver, Brian Curless, and David H. Salesin. Image analogies. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, 2001. 4", + "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 10, 16", + "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 4", + "[25] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In ICLR, 2022. 7", + "[26] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Huanzhang Dou, Yupeng Shi, Yutong Feng, Chen Liang, Yu Liu, and Jingren Zhou. Group diffusion transformers are unsupervised multitask learners. arXiv preprint arxiv:2410.15027, 2024. 4", + "[27] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arxiv:2410.23775, 2024. 2, 3" + ], + "bbox": [ + 516, + 92, + 903, + 898 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Shijie Huang, Yiren Song, Yuxuan Zhang, Hailong Guo, Xueyin Wang, Mike Zheng Shou, and Jiaming Liu. Photodoodle: Learning artistic image editing from few-shot pairwise data. arXiv preprint arXiv:2502.14397, 2025. 5", + "[29] Zehuan Huang, Yuanchen Guo, Haoran Wang, Ran Yi, Lizhuang Ma, Yan-Pei Cao, and Lu Sheng. Mv-adapter: Multi-view consistent image generation made easy. arXiv preprint arXiv:2412.03632, 2024. 5", + "[30] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 10, 16", + "[31] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3", + "[32] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 3", + "[33] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 3, 4, 5, 7, 10, 11, 16", + "[34] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 4", + "[35] Duong H. Le, Tuan Pham, Sangho Lee, Christopher Clark, Aniruddha Kembhavi, Stephan Mandt, Ranjay Krishna, and Jiasen Lu. One diffusion to generate them all, 2024. 2, 3, 4, 10", + "[36] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023. 16", + "[37] Kunchang Li, Yali Wang, Junhao Zhang, Peng Gao, Guanglu Song, Yu Liu, Hongsheng Li, and Yu Qiao. Uniformer: Unifying convolution and self-attention for visual recognition. IEEE TPAMI, 2023. 5", + "[38] Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, MingMing Cheng, and Ying Shan. Photomaker: Customizing realistic human photos via stacked id embedding. In CVPR, 2024. 2, 4, 6", + "[39] Weifeng Lin, Xinyu Wei, Renrui Zhang, Le Zhuo, Shitian Zhao, Siyuan Huang, Junlin Xie, Yu Qiao, Peng Gao, and Hongsheng Li. Pixwizard: Versatile image-to-image visual assistant with open-language instructions. arXiv preprint arXiv:2409.15278, 2024. 2, 5, 6", + "[40] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In ICLR, 2023. 4", + "[41] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 4", + "[42] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 4" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Yihao Liu, Xiangyu Chen, Xianzheng Ma, Xintao Wang, Jiantao Zhou, Yu Qiao, and Chao Dong. Unifying image processing as visual prompting question answering. arXiv preprint arXiv:2310.10513, 2023. 3, 4, 6", + "[44] Chaojie Mao, Jingfeng Zhang, Yulin Pan, Zeyinzi Jiang, Zhen Han, Yu Liu, and Jingren Zhou. Ace++: Instruction-based image creation and editing via context-aware content filling. arXiv preprint arXiv:2501.02487, 2025. 2, 4", + "[45] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073, 2021. 7", + "[46] Noor Nashid, Mifta Sintaha, and Ali Mesbah. Retrieval-based prompt selection for code-related few-shot learning. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pages 2450-2462. IEEE, 2023. 8", + "[47] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 11, 16", + "[48] William Peebles and Saining Xie. Scalable diffusion models with transformers. In ICCV, 2023. 4, 16", + "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10, 11, 16", + "[50] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, ChaoYuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. 5", + "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 4", + "[52] Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. arXiv preprint arXiv:2112.08633, 2021. 8", + "[53] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3", + "[54] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 2, 16", + "[55] Dianmo Sheng, Dongdong Chen, Zhentao Tan, Qiankun Liu, Qi Chu, Jianmin Bao, Tao Gong, Bin Liu, Shengwei Xu, and Nenghai Yu. Towards more unified in-context visual understanding. In CVPR, 2024. 4", + "[56] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image" + ], + "bbox": [ + 514, + 90, + 903, + 900 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "generation in any style. arXiv preprint arXiv:2306.00983, 2023.16", + "[57] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021. 7", + "[58] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 4", + "[59] Yanpeng Sun, Qiang Chen, Jian Wang, Jingdong Wang, and Zechao Li. Exploring effective factors for improving visual in-context learning. arXiv preprint arXiv:2304.04748, 2023. 4", + "[60] Yasheng SUN, Yifan Yang, Houwen Peng, Yifei Shen, Yuqing Yang, Han Hu, Lili Qiu, and Hideki Koike. Imagebrush: Learning visual in-context instructions for exemplar-based image manipulation. In NeurIPS, 2023. 3, 4, 6", + "[61] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 4, 5, 6, 10, 11", + "[62] Paints-Undo Team. Paints-undo github page, 2024. 5", + "[63] Alex Jinpeng Wang, Linjie Li, Yiqi Lin, Min Li, Lijuan Wang, and Mike Zheng Shou. Leveraging visual tokens for extended text contexts in multi-modal learning. NeurIPS, 2024. 4", + "[64] Haofan Wang, Peng Xing, Renyuan Huang, Hao Ai, Qixun Wang, and Xu Bai. Instantstyle-plus: Style transfer with content-preserving in text-to-image generation. arXiv preprint arXiv:2407.00788, 2024. 2, 4, 5, 10", + "[65] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5", + "[66] Xinlong Wang, Wen Wang, Yue Cao, Chunhua Shen, and Tiejun Huang. Images speak in images: A generalist painter for in-context visual learning. In CVPR, 2023. 3, 4", + "[67] Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, and Tiejun Huang. Seggpt: Towards segmenting everything in context. In ICCV, 2023. 3, 4", + "[68] Zhendong Wang, Yifan Jiang, Yadong Lu, yelong shen, Pengcheng He, Weizhu Chen, Zhangyang Wang, and Mingyuan Zhou. In-context learning unlocked for diffusion models. In NeurIPS, 2023. 4", + "[69] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image editing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 2, 3, 5, 6", + "[70] Shaojin Wu, Mengqi Huang, Wenxu Wu, Yufeng Cheng, Fei Ding, and Qian He. Less-to-more generalization: Unlocking more controllability by in-context generation. arXiv preprint arXiv:2504.02160, 2025. 8", + "[71] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and" + ], + "bbox": [ + 91, + 92, + 482, + 901 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2, 3, 4, 10, 11", + "[72] Saining Xie and Zhuowen Tu. Holistically-nested edge detection. In CVPR, 2015. 5", + "[73] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, Fisher Yu, Dacheng Tao, and Andreas Geiger. Unifying flow, stereo and depth estimation. IEEE TPAMI, 2023. 5", + "[74] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 5", + "[75] Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, and Yujiu Yang. Maniaq: Multi-dimension attention network for no-reference image quality assessment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1191-1200, 2022. 10, 16", + "[76] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 3", + "[77] Fanghua Yu, Jinjin Gu, Zheyuan Li, Jinfan Hu, Xiangtao Kong, Xintao Wang, Jingwen He, Yu Qiao, and Chao Dong. Scaling up to excellence: Practicing model scaling for photo-realistic image restoration in the wild. arXiv preprint arXiv:2401.13627, 2024. 3", + "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 4", + "[79] Hayoung Yun and Hanjoo Cho. Achievement-based training progress balancing for multi-task learning. In ICCV, 2023. 3", + "[80] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, 2023. 3, 4, 5, 10", + "[81] Yuxin Zhang, Nisha Huang, Fan Tang, Haibin Huang, Chongyang Ma, Weiming Dong, and Changsheng Xu. Inversion-based style transfer with diffusion models. In CVPR, 2023. 2, 3, 11", + "[82] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? In NeurIPS, 2023. 3, 4", + "[83] Canyu Zhao, Mingyu Liu, Huanyi Zheng, Muzhi Zhu, Zhiyue Zhao, Hao Chen, Tong He, and Chunhua Shen. Disception: A generalist diffusion model for visual perceptual tasks. arXiv preprint arXiv:2502.17157, 2025. 4", + "[84] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CAAI Artificial Intelligence Research, 2024. 5", + "[85] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, 2017. 3", + "[86] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024. 4" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[87] Muzhi Zhu, Yang Liu, Zekai Luo, Chenchen Jing, Hao Chen, Guangkai Xu, Xinlong Wang, and Chunhua Shen. Unleashing the potential of the diffusion model in few-shot semantic segmentation. In NeurIPS, 2024. 3, 4", + "[88] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Xiangyang Zhu, Fu-Yun Wang, Zhanyu Ma, Xu Luo, Zehan Wang, Kaipeng Zhang, Lirui Zhao, Si Liu, Xiangyu Yue, Wanli Ouyang, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina next: Making lumina-t2x stronger and faster with next-dit. In NeurIPS, 2024. 2, 3, 4" + ], + "bbox": [ + 91, + 92, + 480, + 244 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Appendix A. Instruction Format", + "text_level": 1, + "bbox": [ + 91, + 90, + 366, + 107 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. Fig. 13 illustrates the instructions for concept fusion of style, subject, and layout (Fig. 13 upper) and image editing with reference (Fig. 13 bottom). The content instruction is omitted for some tasks that provide strong visual cues in conditions, like style transfer.", + "bbox": [ + 89, + 114, + 483, + 252 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Appendix B. Fine-tuning FLUX.1-dev Model", + "text_level": 1, + "bbox": [ + 89, + 267, + 470, + 285 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Apart from FLUX.1-Fill-dev, we also adapt our method to FLUX.1-dev [33], a common text-to-image generative model. Unlike the infilling model that shares a consistent objective with universal image generation, FLUX.1-dev requires customized modifications to process clean condition images and noise target images. Specifically, after concatenating images in a grid layout like the infilling model, we always keep the region corresponding to the conditions as clean latent embeddings throughout the sampling process. This strategy requires modifications in image sampling because FLUX.1-Fill-dev takes noise latent embeddings as input. Moreover, for the adaLN-Zero block [48], it is critical to calculate the separate mean and shift parameters for the regions of clean conditions and noise target by feeding $T = 0$ and $T = t$ into the adaLN-Zero, respectively. $t$ indicates the timestep in each sampling step and gradually increases from 0 to 1 along the sampling process. This strategy aligns with the pre-training domain of FLUX.1-dev, where different noise levels correspond to different mean and shift. As shown in Fig. 14, this strategy ensures the visual fidelity.", + "bbox": [ + 89, + 292, + 483, + 609 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Appendix C. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 89, + 627, + 366, + 643 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1. Conditioning Generation", + "text_level": 1, + "bbox": [ + 89, + 652, + 323, + 667 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We assess the models from controllability, quality, and text consistency to evaluate image generation quality in conditioning generation and image restoration tasks.", + "bbox": [ + 89, + 675, + 482, + 720 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Controllability. For conditional image generation, we measure the difference between the input conditions and those extracted from generated images. Specifically, we calculate the F1 Score for the cany-to-image task and RMSE for the depth-to-image task. Additionally, for deblurring, we measure the RMSE between original and restored images.", + "bbox": [ + 89, + 734, + 482, + 825 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Generation quality. We measure the Generation quality using FID [23], SSIM, MAN-IQA [75], and MAN-IQA [75]. FID [23] measures the similarity between generated and real image feature distributions. SSIM evalu", + "bbox": [ + 89, + 840, + 483, + 902 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ates perceptual quality by comparing luminance, contrast, and structural patterns between images. It calculates local patch statistics and combines them into a composite score ranging from $-1$ to 1, with higher values indicating better structural preservation. MANIQA [75] and MUSIQ [30] leverage neural networks to predict image quality scores.", + "bbox": [ + 511, + 90, + 906, + 183 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text consistency. Leveraging the powerful multi-modal capability of CLIP [49], we also measure the semantic alignment between generated images and text prompts, which reflects how the model follows instructions.", + "bbox": [ + 511, + 200, + 906, + 260 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.2. Subject Driven Generation", + "text_level": 1, + "bbox": [ + 513, + 268, + 761, + 286 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Following DreamBooth [54] and BLIP-Diffusion [36], we measure DINOv2 [47], CLIP-I [49], and CLIP-T scores for the comparison of subject-driven image generation. DINOv2 [47] and CLIP-I scores measure the alignment between the reference subject and generated images through cosine similarity and CLIP score, respectively. CLIP-T measures the alignment between the generated image and the corresponding text prompt.", + "bbox": [ + 511, + 291, + 906, + 414 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.3. Style Transfer", + "text_level": 1, + "bbox": [ + 513, + 422, + 663, + 436 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Following StyleDrop [56], we assess the performance of style transfer according to text consistency and style alignment. For text alignment, we measure the cosine similarity between embeddings of generated images and text prompts, where the embeddings are extracted by CLIP [49]. Regarding style consistency, we measure the cosine similarity between embeddings of generated images and style reference. Note that these two metrics should be considered together because the style consistency will reach 1.0 if the model collapses, where the model completely copies style reference as a composite image and ignores text instructions.", + "bbox": [ + 511, + 444, + 908, + 611 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 87, + 633, + 409 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Layout instruction:", + "text_level": 1, + "bbox": [ + 656, + 95, + 785, + 109 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "12 images are organized into a grid of 3 rows and 4 columns, evenly spaced.", + "bbox": [ + 656, + 122, + 849, + 164 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Task instruction:", + "text_level": 1, + "bbox": [ + 656, + 181, + 769, + 194 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Each row describes a process that begins with [IMAGE1] white edge lines on black from canny detection, [IMAGE2] Photo with a strong artistic theme, [IMAGE3] a reference image showcasing the dominant object and results in [IMAGE4] High-quality visual with distinct artistic touch.", + "bbox": [ + 656, + 205, + 869, + 330 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Content instruction:", + "text_level": 1, + "bbox": [ + 656, + 338, + 790, + 351 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "0", + "bbox": [ + 660, + 361, + 676, + 378 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg", + "image_caption": [ + "(a) Concatenated images", + "Figure 13. Examples of language instructions that contain prompts about the layout of the concatenated image, task intent, and content of the target image." + ], + "image_footnote": [], + "bbox": [ + 119, + 417, + 633, + 816 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Layout instruction:", + "text_level": 1, + "bbox": [ + 656, + 441, + 785, + 455 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A 3x3 grid containing 9 images, aligned in a clean and structured layout", + "bbox": [ + 656, + 470, + 859, + 512 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Task instruction:", + "text_level": 1, + "bbox": [ + 656, + 532, + 769, + 545 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Every row provides a step-by-step guide to evolve [IMAGE1] a reference image with the main subject included, [IMAGE2] an image with flawless clarity into [IMAGE3] a high-quality image.", + "bbox": [ + 656, + 558, + 869, + 643 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Content instruction:", + "text_level": 1, + "bbox": [ + 656, + 659, + 790, + 671 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The bottom-right corner image presents: A glossy gel nail polish bottle. At the edge of a bustling city park, this item rests on vibrant green grass, captured with a subtle bokeh effect as joggers and pets move in the background.", + "bbox": [ + 656, + 686, + 869, + 785 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(b) Language instructions", + "text_level": 1, + "bbox": [ + 681, + 818, + 851, + 832 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg", + "image_caption": [ + "Condition" + ], + "image_footnote": [], + "bbox": [ + 93, + 388, + 294, + 486 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 295, + 388, + 495, + 486 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg", + "image_caption": [ + "Condition" + ], + "image_footnote": [], + "bbox": [ + 503, + 388, + 702, + 486 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 702, + 388, + 903, + 486 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg", + "image_caption": [ + "(a) separate mean and shift" + ], + "image_footnote": [], + "bbox": [ + 94, + 494, + 287, + 580 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg", + "image_caption": [ + "Figure 14. Effects of separate mean and shift in fine-tuning FLUX.1-dev." + ], + "image_footnote": [], + "bbox": [ + 292, + 494, + 495, + 580 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg", + "image_caption": [ + "(b) unified mean and shift" + ], + "image_footnote": [], + "bbox": [ + 503, + 494, + 700, + 580 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 494, + 903, + 580 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_model.json b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6e9ede4f6742236c4808ef6ad779cd0c6b67a350 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_model.json @@ -0,0 +1,5186 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.058, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.07960v3 [cs.CV] 14 Dec 2025" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.131, + 0.784, + 0.177 + ], + "angle": 0, + "content": "VisualCloze: A Universal Image Generation Framework via Visual In-Context Learning" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.203, + 0.804, + 0.222 + ], + "angle": 0, + "content": "Zhong-Yu Li\\(^{1,4*}\\) Ruoyi Du\\(^{2,4*}\\) Juncheng Yan\\(^{3,4}\\) Le Zhuo\\(^{4}\\) Qilong Wu\\(^{4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.222, + 0.746, + 0.24 + ], + "angle": 0, + "content": "Zhen Li\\(^{5\\dagger}\\) Peng Gao\\(^{4}\\) Zhanyu Ma\\(^{2}\\) Ming-Ming Cheng\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.239, + 0.84, + 0.257 + ], + "angle": 0, + "content": "\\(^{1}\\)VCIP, CS, Nankai University \\(^{2}\\)Beijing University of Posts and Telecommunications" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.257, + 0.862, + 0.276 + ], + "angle": 0, + "content": "\\(^{3}\\)Tsinghua University \\(^{4}\\)Shanghai AI Laboratory \\(^{5}\\)The Chinese University of Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.276, + 0.673, + 0.294 + ], + "angle": 0, + "content": "Project page: https://visualcloze.github.io" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.306, + 0.186, + 0.319 + ], + "angle": 0, + "content": "Understand" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.32, + 0.174, + 0.328 + ], + "angle": 0, + "content": "the task" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.306, + 0.243, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.173, + 0.32, + 0.242, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.308, + 0.359, + 0.32 + ], + "angle": 0, + "content": "In-context examples" + }, + { + "type": "image", + "bbox": [ + 0.247, + 0.32, + 0.293, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.32, + 0.393, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.357, + 0.214, + 0.366 + ], + "angle": 0, + "content": "Style +" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.366, + 0.214, + 0.376 + ], + "angle": 0, + "content": "Subject =" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.365, + 0.244, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.365, + 0.335, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.365, + 0.364, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.365, + 0.417, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.311, + 0.536, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.537, + 0.313, + 0.647, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.657, + 0.312, + 0.771, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.773, + 0.313, + 0.885, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.444, + 0.387, + 0.542, + 0.396 + ], + "angle": 0, + "content": "Dense Prediction" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.387, + 0.772, + 0.396 + ], + "angle": 0, + "content": "Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.441, + 0.189, + 0.451 + ], + "angle": 0, + "content": "Fill blank grid" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.453, + 0.19, + 0.463 + ], + "angle": 0, + "content": "by reasoning" + }, + { + "type": "title", + "bbox": [ + 0.195, + 0.411, + 0.317, + 0.423 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.423, + 0.296, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.423, + 0.346, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.347, + 0.411, + 0.392, + 0.422 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.423, + 0.371, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.433, + 0.384, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.472, + 0.393, + 0.484 + ], + "angle": 0, + "content": "Extend to diverse tasks" + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.405, + 0.535, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.435, + 0.479, + 0.542, + 0.489 + ], + "angle": 0, + "content": "Image Restoration" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.405, + 0.647, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.405, + 0.771, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.773, + 0.405, + 0.885, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.13, + 0.5, + 0.197, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.142, + 0.567, + 0.279, + 0.578 + ], + "angle": 0, + "content": "Subject + Layout + Style" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.5, + 0.331, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.566, + 0.279, + 0.578 + ], + "angle": 0, + "content": "Out + Style" + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.5, + 0.434, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.435, + 0.5, + 0.542, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.57, + 0.499, + 0.672, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.594, + 0.567, + 0.681, + 0.578 + ], + "angle": 0, + "content": "Subject-driven" + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.5, + 0.78, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.677, + 0.566, + 0.771, + 0.578 + ], + "angle": 0, + "content": "#" + }, + { + "type": "image", + "bbox": [ + 0.781, + 0.5, + 0.887, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.781, + 0.566, + 0.885, + 0.578 + ], + "angle": 0, + "content": "\\( \\therefore m : x = 1 \\) 或 \\( {3x} + {4y} + 1 = 0 \\)" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.587, + 0.189, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.586, + 0.266, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.586, + 0.347, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.648, + 0.35, + 0.659 + ], + "angle": 0, + "content": "Relighting" + }, + { + "type": "image", + "bbox": [ + 0.35, + 0.586, + 0.417, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.586, + 0.506, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.648, + 0.505, + 0.659 + ], + "angle": 0, + "content": "Virtual Try-On" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.586, + 0.574, + 0.649 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.575, + 0.586, + 0.655, + 0.649 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.586, + 0.736, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.691, + 0.648, + 0.772, + 0.658 + ], + "angle": 0, + "content": "Style Transfer" + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.586, + 0.812, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.74, + 0.648, + 0.8, + 0.657 + ], + "angle": 0, + "content": "ansfer" + }, + { + "type": "image", + "bbox": [ + 0.814, + 0.586, + 0.885, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.668, + 0.259, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.719, + 0.204, + 0.73 + ], + "angle": 0, + "content": "Editing (Add)" + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.668, + 0.396, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.668, + 0.518, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.669, + 0.639, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.669, + 0.721, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.653, + 0.72, + 0.783, + 0.731 + ], + "angle": 0, + "content": "Subject-driven Editing" + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.669, + 0.802, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.722, + 0.72, + 0.803, + 0.731 + ], + "angle": 0, + "content": "even Editing" + }, + { + "type": "image", + "bbox": [ + 0.804, + 0.669, + 0.885, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.739, + 0.208, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.742, + 0.296, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.742, + 0.389, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.742, + 0.488, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.739, + 0.592, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.78, + 0.567, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.8, + 0.583, + 0.809 + ], + "angle": 0, + "content": "Multi-View" + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.742, + 0.682, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.74, + 0.772, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.798, + 0.772, + 0.808 + ], + "angle": 0, + "content": "Multi-View" + }, + { + "type": "image", + "bbox": [ + 0.816, + 0.742, + 0.884, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.82, + 0.907, + 0.863 + ], + "angle": 0, + "content": "Figure 1. The top left illustrates our universal image generation framework based on visual in-context learning. Given one query of a specific task, the generative model learns the task by observing a few in-context examples presented as demonstrations. For each task, the generation result is indicated by a red box." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.107, + 0.294, + 0.118 + ], + "angle": 0, + "content": "Without In-context Example" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.12, + 0.178, + 0.163 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.164, + 0.176, + 0.173 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.12, + 0.252, + 0.163 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.164, + 0.251, + 0.173 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.12, + 0.326, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.164, + 0.293, + 0.173 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.185, + 0.289, + 0.196 + ], + "angle": 0, + "content": "+One In-context Example" + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.197, + 0.177, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.197, + 0.251, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.197, + 0.325, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.253, + 0.178, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.297, + 0.176, + 0.306 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.253, + 0.251, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.297, + 0.251, + 0.306 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.253, + 0.326, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.297, + 0.293, + 0.305 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.118, + 0.528, + 0.13 + ], + "angle": 0, + "content": "+ Two In-context Examples" + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.141, + 0.412, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.417, + 0.141, + 0.484, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.141, + 0.559, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.197, + 0.412, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.417, + 0.197, + 0.485, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.197, + 0.559, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.253, + 0.412, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.417, + 0.253, + 0.485, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.253, + 0.559, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.297, + 0.412, + 0.306 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image_caption", + "bbox": [ + 0.417, + 0.297, + 0.485, + 0.306 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.297, + 0.559, + 0.306 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image_caption", + "bbox": [ + 0.578, + 0.107, + 0.737, + 0.118 + ], + "angle": 0, + "content": "Without In-context Example" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.119, + 0.657, + 0.163 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.588, + 0.164, + 0.655, + 0.173 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.119, + 0.73, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.662, + 0.164, + 0.696, + 0.173 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image_caption", + "bbox": [ + 0.744, + 0.118, + 0.895, + 0.13 + ], + "angle": 0, + "content": "+ Two In-context Example" + }, + { + "type": "image_caption", + "bbox": [ + 0.58, + 0.185, + 0.731, + 0.196 + ], + "angle": 0, + "content": "+ One In-context Example" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.197, + 0.656, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.661, + 0.197, + 0.728, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.748, + 0.141, + 0.816, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.822, + 0.141, + 0.89, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.252, + 0.656, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.297, + 0.653, + 0.306 + ], + "angle": 0, + "content": "Visual Prompt" + }, + { + "type": "image", + "bbox": [ + 0.661, + 0.252, + 0.728, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.297, + 0.696, + 0.306 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.749, + 0.196, + 0.817, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.822, + 0.197, + 0.89, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.748, + 0.252, + 0.819, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.823, + 0.252, + 0.892, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.578, + 0.311, + 0.898, + 0.358 + ], + "angle": 0, + "content": "Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: making [IMAGE1] the standing woman [IMAGE2] sit down and give the thumbs up." + }, + { + "type": "image_caption", + "bbox": [ + 0.091, + 0.339, + 0.905, + 0.386 + ], + "angle": 0, + "content": "face, to generate [IMAGE3] that faces the center of the lens. The last row is: making [IMAGE1] the standing woman the final row is: the woman's frontal face that faces the center of the lens. [IMAGE2] sit down and give the thumbs up. Figure 2. Unseen Tasks : Generalizing to tasks unseen during training via in-context learning. More in-context examples lead to more accurate results." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.4, + 0.326, + 0.415 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.433, + 0.484, + 0.811 + ], + "angle": 0, + "content": "Recent progress in diffusion models significantly advances various image generation tasks. However, the current mainstream approach remains focused on building task-specific models, which have limited efficiency when supporting a wide range of different needs. While universal models attempt to address this limitation, they face critical challenges, including generalizable task instruction, appropriate task distributions, and unified architectural design. To tackle these challenges, we propose VisualCloze, a universal image generation framework, which supports a wide range of in-domain tasks, generalization to unseen ones, unseen unification of multiple tasks, and reverse generation. Unlike existing methods that rely on language-based task instruction, leading to task ambiguity and weak generalization, we integrate visual in-context learning, allowing models to identify tasks from visual demonstrations. Meanwhile, the inherent sparsity of visual task distributions hampers the learning of transferable knowledge across tasks. To this end, we introduce Graph200K, a graph-structured dataset that establishes various interrelated tasks, enhancing task density and transferable knowledge. Furthermore, we uncover that our unified image generation formulation shared a consistent objective with image infilling, enabling us to leverage the strong generative priors of pre-trained infilling models without modifying the architectures." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.818, + 0.222, + 0.833 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.844, + 0.484, + 0.874 + ], + "angle": 0, + "content": "Recent advancements in image generation, propelled by the progress of diffusion models [15, 33, 88], have led to a" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.402, + 0.907, + 0.583 + ], + "angle": 0, + "content": "wide range of applications, including image editing [69], style transfer [64, 81], virtual try-on [11, 12], and personalized generation [38, 54], among others. However, these tasks typically require task-specific models, which limit efficiency and scalability for real-world applications. In recent years, there has been growing interest in universal generative models [27, 39, 44], aiming to handle diverse image generation tasks, even unseen ones, within a single unified framework. Despite significant progress, some critical issues remain to be addressed, such as (1) distinguishable and generalizable task instruction, (2) comprehensive task coverage during training, and (3) a unified model architecture." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.584, + 0.909, + 0.903 + ], + "angle": 0, + "content": "An ideal task instruction is crucial for guiding the model to process the desired task effectively. Existing methods primarily rely on language instructions [27, 44] or task-specific tokens [39] to distinguish the task to be performed. However, the complexity of visual tasks and the inherent gap between vision and language modalities make it hard for the model to understand language-only task descriptions, which leads to task confusion [39] and hinders generalization on unseen tasks [35, 71]. Moreover, pre-learned task-specific tokens constrain the model only to handle seen tasks. In contrast, large language models (LLMs) have successfully achieved unified multi-task modeling, partially due to the rise of in-context learning [5], which allows models to adapt various tasks using only a few demonstrations. We aim to replicate the concept of in-context learning in the pure visual modality, where the model learns the desired task directly from a few visual examples as task demonstrations, as shown in Fig. 1 (Left Top). In this setting, in-context learning shows strong potential for universal image generation. We summarize four key findings: (1) it supports various in-domain tasks with reduced task ambiguity (Fig. 1);" + }, + { + "type": "page_footnote", + "bbox": [ + 0.115, + 0.888, + 0.228, + 0.9 + ], + "angle": 0, + "content": "* Equal contribution" + }, + { + "type": "page_footnote", + "bbox": [ + 0.247, + 0.888, + 0.368, + 0.9 + ], + "angle": 0, + "content": "Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.092, + 0.426, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.443, + 0.094, + 0.905, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.299, + 0.908, + 0.342 + ], + "angle": 0, + "content": "Figure 3. Unseen Tasks: Leveraging in-context learning to unify multiple seen tasks into a single-step unseen task. Left: Unifying the [Depth to Image] and [Relighting] task into a single [Depth to Images with Various Lighting] task. Right: Unifying multiple dense prediction tasks into a joint prediction task. Results without visual context can be found in the appendix." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.354, + 0.483, + 0.506 + ], + "angle": 0, + "content": "(2) it generalizes to unseen tasks (Fig. 2, Fig. 8); (3) as an unseen strategy for task unification, it can integrate multiple sub-tasks into a single step and generate intermediate results (Fig. 3); (4) it enables reverse generation, i.e., inferring a set of conditions from a given target (Fig. 9). While prior works [1, 3, 4, 43, 66, 71, 82] have also explored in-context learning in vision, they are largely constrained to specific domains (such as dense prediction or style transfer [67, 87]), or simplified generation settings involving only one condition and one target image [43, 60]." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.507, + 0.483, + 0.809 + ], + "angle": 0, + "content": "From the perspective of task distribution, visual tasks are inherently sparse compared to those in natural language processing because task-specific datasets [71, 85] for different tasks have minimal overlap [19, 32, 79]. Such sparse task learning isolates the knowledge of each task and limits the model from learning shared features across tasks. Moreover, the weak correlations between tasks hinder knowledge transfer and adaptability to new tasks. However, existing works in multi-task learning [10, 16, 31, 53] have verified the benefits of overlapping knowledge across related tasks. To alleviate the sparsity of visual tasks, we introduce a graph-structured dataset, Graph200K, where each image is associated with annotations spanning five metatasks, i.e., conditional generation [80], IP preservation [76], style transfer [81], image editing [69], and restoration [77]. By combining different conditions, we train the model with a variety of tasks that overlap with each other. Given this highly overlapping and compact task space, our dataset significantly increases task density, allowing the model to learn shared and transferable knowledge more effectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.483, + 0.902 + ], + "angle": 0, + "content": "For the architecture design, it is essential to 1) accommodate flexible task formats [27, 35, 71], ensuring seamless in-context learning, and 2) remain compatible with state-of-the-art models [33, 88] to fully leverage their strong generative priors. In this work, we find that the state-of-the-art image infilling model [33] has a consistent objective with our" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.355, + 0.907, + 0.46 + ], + "angle": 0, + "content": "in-context learning based universal generative formulation. Specifically, we concatenate all input and output images together, where the objective of a task is to fill the output area. This alignment enables us to build our model upon advanced general-purpose infilling models without additional modifications, achieving powerful universal generation capabilities with minimal data and training costs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.461, + 0.909, + 0.642 + ], + "angle": 0, + "content": "In this work, we propose a universal image generation framework, VisualCloze, which fine-tunes FLUX.1-Filldev [33] with interrelated tasks sampled from Graph200K to learn transferable knowledge and support visual in-context learning. As the number of in-context examples increases, we observe enhanced performances and reduced task confusion, enabling the model to support a broad spectrum of in-domain tasks, including conditional generation, image restoration, editing, style transfer, IP-preservation, and their combinations. On unseen tasks, the model also shows a certain degree of generalization ability, as shown in Fig. 2. In summary, our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.643, + 0.905, + 0.687 + ], + "angle": 0, + "content": "- We propose an in-context learning based universal image generation framework that supports a wide range of indomain tasks and exhibits generalization to unseen ones." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.688, + 0.905, + 0.747 + ], + "angle": 0, + "content": "- We design a graph-structured dataset, Graph200K, which constructs a compact task space, enabling flexible online task sampling and promoting the models to learn shared and transferable knowledge across tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.748, + 0.905, + 0.809 + ], + "angle": 0, + "content": "- Our unified image generation formulation shares a consistent objective with the state-of-the-art infilling model, enabling exceptional performance through minimal tuning without modifying the structure." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.643, + 0.905, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.823, + 0.655, + 0.838 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.848, + 0.691, + 0.864 + ], + "angle": 0, + "content": "2.1. Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Recent advances in text-to-image generation have achieved remarkable performance, largely driven by the development" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.425 + ], + "angle": 0, + "content": "of autoregressive models [41, 58, 78] and diffusion models [2, 13, 15, 18, 24, 40, 42, 48, 51]. Among these, rectified flow transformers [15, 17, 33, 88] have shown great training efficiency and overall performance. Building on these foundational models, diverse applications have emerged, such as conditional generation [80], style transfer [64], and personalized generation [38]. More recently, universal models that address various tasks [35, 44, 83] have been explored. For example, unified models like OmniGen [71] leverage large vision language models to consolidate multiple tasks into a single framework. Similarly, UniReal [9] unifies image generation tasks as discontinuous video generation. However, they still face issues such as over-reliance on language instructions, isolation and sparsity of visual tasks, and architecture design accommodating flexible task formats. To address these issues, we propose a universal image generation framework that unifies generation tasks as image infilling. Through visual in-context learning and our Graph200K dataset that constructs a denser task space to learn transferable knowledge, our method alleviates ambiguity to support a diverse set of in-domain tasks and generalizes to tasks unseen during training." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.462, + 0.336, + 0.479 + ], + "angle": 0, + "content": "2.2. Visual In-context Learning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.494, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Along with the emergence of large language models, such as GPT-3 [5], in-context learning [14] has been an effective approach to allow the language model to understand and perform complex tasks given a few demonstrations. Early works [21, 22] in vision modality propose image analogies to create an image filter from examples automatically. In recent years, leveraging inpainting model [3, 4, 82], masked image modeling [43, 66, 67], or vision-language model [1, 86], visual in-context learning is proposed to handle more tasks. However, they mainly focus on dense prediction [55, 59, 87] or visual understanding [63]. OmniGen [71] also leverages in-context learning to generalize to unseen domains, e.g., segmenting unseen concepts when the model has learned the segmentation task during training. However, it mainly focuses on simple tasks of dense prediction, and the gap between the unseen and training domains is still limited. Some recent works [34, 43, 60, 68] extend visual in-context learning to image generation, but they are still limited by simple tasks such as conditional generation and dense prediction. Moreover, the sparsity of visual tasks makes it difficult for models to learn transferable and overlapping knowledge across tasks, limiting the generation ability of in-context learning. In contrast, we introduce a graph-structured dataset that supports interrelated tasks and thus constructs a more dense task space, promoting the model to learn shared and transferable knowledge and enhance its adaptability." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.092, + 0.91, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.42, + 0.908, + 0.492 + ], + "angle": 0, + "content": "Figure 4. Illustration of the proposed Graph200K dataset. Each image is annotated for five meta-tasks, i.e., conditional generation, image restoration, image editing, IP preservation, and style transfer. Using these tasks, we can combine a wide range of complex tasks, such as the bottom of the figure." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.519, + 0.603, + 0.535 + ], + "angle": 0, + "content": "3. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.546, + 0.907, + 0.803 + ], + "angle": 0, + "content": "Recent works [26, 44, 71] have made great progress in unified image generation. However, their generalization to unseen tasks remains highly limited. We partially attribute this issue to the sparsity and isolation of visual tasks, hindering the model from learning shared features across tasks and handling unseen ones. Moreover, weak correlations between tasks further hinder knowledge transfer, restricting the adaptability of models. Therefore, increasing task density or strengthening task inter-relations helps improve the generalization ability of models via a compact task distribution. In this paper, we take the Subject200K [61] dataset as a starting point and construct our Graph200K dataset by augmenting each image with 49 types of annotations spanning five meta-tasks. This enriched annotation space enables flexible construction of a wide range of related tasks by sampling and combining arbitrary subsets of annotations across different meta-tasks, as illustrated in Fig. 4." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.817, + 0.843, + 0.833 + ], + "angle": 0, + "content": "3.1. Graph-Structured Multi-Task Dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.903 + ], + "angle": 0, + "content": "In natural language processing, tasks overlap significantly, facilitating strong cross-task learning ability. In contrast, visual tasks are inherently distinct, posing challenges for vision models to achieve similar generalization ability via" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.226 + ], + "angle": 0, + "content": "instruction tuning. To ease this issue, we introduce a Graph-Structured Multi-Task Dataset. As illustrated in Fig. 4 (a), given a text-to-image dataset, each image is treated as the central node of a graph, around which diverse task annotations are constructed, including those for various spatial conditions, degradations, image editing results, reference image for IP-preservation, and style transfer with various reference styles. The construction process for each task pair is detailed in the next section." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.23, + 0.487, + 0.471 + ], + "angle": 0, + "content": "As shown in Fig. 4, each task annotation forms a bidirectional edge with the image. Thus, the graph is strongly connected, which means that for any two nodes, bidirectional paths exist between them. In other words, a generation task can be formulated as a path within the graph. The nodes along a path (except the end node) serve as condition images, which is analogous to the question in instruction fine-tuning, while the target image (the end node) plays the role of the answer. Specifically, there are 49 types of nodes in our Graph200K, and we sample up to 134 highly overlapping tasks, making the model learn more compact and shared representations across tasks. Moreover, it enriches the diversity and flexibility of our instruction fine-tuning data. For example, the path reference \\(\\rightarrow\\) editing \\(\\rightarrow\\) image corresponds to the task of image editing with reference, as shown in Fig. 4 bottom." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.485, + 0.292, + 0.5 + ], + "angle": 0, + "content": "3.2. Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.508, + 0.483, + 0.584 + ], + "angle": 0, + "content": "For convenience, we inherit subject-driven data from the Subjects200K [61]. Additionally, 32 different degradations are applied online to the images to acquire restoration data. We summarize the data construction methods in this section for the remaining three tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.483, + 0.795 + ], + "angle": 0, + "content": "Conditional generation. Each image is paired with 12 distinct conditions generated by specialized models, including canny edges [6], HED edges [72], Hough lines [20], semantic segmentation maps [37], depth maps [74], shape normal maps [73], and human keypoints [7], following ControlNet [80]. This work extends the conditions by incorporating SAM2 [50] masks, foreground segmentation, and open-world boxes and masks. The foreground segmentation, derived from the RMBG [84], supports diverse tasks such as inpainting and foreground extraction. Open-world bounding boxes are generated through the grounding caption capability of Qwen2-VL [65], which are processed using SAM2 [50] to produce corresponding masks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Style transfer. We transfer the style of images according to reference in both semantic-variant and semantic-invariant settings. Specifically, the semantic-invariant transfer adopts InstantStyle [64] to preserve the semantic content, while the semantic-variant transfer relies on FLUX.1-Redux-dev [33], using the style embeddings and depth as" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.907, + 0.138 + ], + "angle": 0, + "content": "conditions. For each image, we randomly generate five stylized versions. Mixing the two tasks pushes the model to follow the in-context examples better to avoid ambiguity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.15, + 0.907, + 0.348 + ], + "angle": 0, + "content": "Image editing. We design two types of editing tasks, including background-variant and background-invariant editing. The background-invariant editing begins with localizing the subjects. Then, we leverage a large vision-language model, Qwen2-VL [65], to modify the image caption with a new object that replaces the original subject. The image, with the subject masked, is subsequently processed by the FLUX.1-Fill-dev [33] inpainting model to integrate the alternative object into the masked region. The above operation is repeated five times to enrich the dataset. For background-variant editing, the difference lies in the last step, which utilizes FLUX.1-Redux-dev [33] with depth as the condition and the modified caption as the text prompt." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.356, + 0.64, + 0.372 + ], + "angle": 0, + "content": "3.3. Other Data" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.378, + 0.909, + 0.56 + ], + "angle": 0, + "content": "To further expand the range of tasks and enhance the generalization ability of models, we incorporate several open-source datasets during training, including VITON-HD [11] for virtual try-on and PhotoDoodle [28] for artistic image editing. For image editing tasks, we also extend the dataset with OmniEdit [69]. Specifically, two sub-tasks, i.e., object addition and removal, are used for training. The other editing tasks, such as attribute modification and environment change, are treated as unseen tasks to assess the generalization ability of the trained model. Furthermore, we leverage a portion of high-quality internal data, covering tasks of the drawing process [62] and multi-view generation [29]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.573, + 0.607, + 0.589 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.598, + 0.909, + 0.765 + ], + "angle": 0, + "content": "This paper identifies the core challenges in building a universal image generation model, including the need for a clearly defined and generalizable task formulation, visual task sparsity, and the lack of a unified framework for multi-task learning. In the previous section, we addressed the issue of task sparsity by constructing the compact Graph200K dataset. Sec. 4.1 introduces visual in-context learning as the ideal paradigm for universal task formulation. Afterward, Sec. 4.2 considers the image infilling model a unified multi-task framework, achieving strong generalization capabilities with minimal cost." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.774, + 0.759, + 0.79 + ], + "angle": 0, + "content": "4.1. Visual In-context Learning" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Language instructions are usually used to specify the generation definition to handle multiple visual generation tasks with a single generative model. However, due to the gap between vision and language, the text comprehension ability of image generation models remains limited. This issue leads to task confusion [39] in existing universal generative models and weak generalization to unseen tasks. Inspired" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.103, + 0.098, + 0.485, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.272, + 0.483, + 0.33 + ], + "angle": 0, + "content": "Figure 5. Concatenating images when applying position embeddings. The \\( L \\) images within \\( C \\) in-context examples and the query are first concatenated horizontally. Then, these concatenated rows are concatenated temporally to handle mismatched aspect ratios." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.355, + 0.483, + 0.416 + ], + "angle": 0, + "content": "by the success of few-shot learning on large language models [5], we recognize that visual context may serve as a more friendly task instruction for visual generative models, given their superior visual understanding capabilities." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.416, + 0.484, + 0.703 + ], + "angle": 0, + "content": "Therefore, in this paper, we re-propose visual in-context learning to build a universal and generalizable image generation system. For the sake of description, here we assume the image input-output of arbitrary conditional generation task as a query consisting of \\( L - 1 \\) condition images and a blank target \\( \\varnothing \\) to be completed by the model, i.e., \\( X = \\mathrm{concat}(\\{x_1,\\dots ,x_{L - 1},\\emptyset \\}) \\). In Sec. 5.1, we demonstrate that our method can be extended to more general scenarios, where it can generate images at arbitrary positions and in any quantity rather than just the single image at the end of the query. During training, we randomly provide up to \\( C \\) in-context examples, each containing \\( L \\) images as the query. This strategy ensures the generalization ability of models across different numbers of in-context examples. In our experiments, we show that providing in-context examples as task demonstrations not only helps alleviate task confusion and boost model performance across in-domain tasks [39], but also enhances the generalization ability on unseen tasks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.713, + 0.365, + 0.728 + ], + "angle": 0, + "content": "4.2. Unified Multi-task Framework" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Unlike previous visual in-context learning methods that primarily focus on scenarios with a single image condition and a single context [43, 60], in this work, we aim to construct a unified framework capable of handling varying numbers of conditions and contexts, allowing for flexible adaptation to diverse tasks. For ease of description, we first assume all images processed by the model share the same size, \\( W \\times H \\), and we extend to the scenario with mismatched aspect ratios at the end of this section. In this way, given \\( C \\) in-context examples and the query, each containing \\( L \\) images, all images can be concatenated into a complete grid-Layout image" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.182 + ], + "angle": 0, + "content": "with a size of \\((L\\times W,(C + 1)\\times H)\\). Then, the model can complete a task by infilling the target grids based on the surrounding context, akin to solving visual cloze puzzles. Therefore, we build our unified framework, VisualCloze, based on the general image infilling architecture capable of handling multiple resolutions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.182, + 0.905, + 0.213 + ], + "angle": 0, + "content": "Consistent with common diffusion-based infilling model designs, our model can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.643, + 0.219, + 0.905, + 0.238 + ], + "angle": 0, + "content": "\\[\n\\hat {X} = f (X \\mid T, M), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.244, + 0.905, + 0.306 + ], + "angle": 0, + "content": "where \\( X \\) is the concatenated image, with the last grid left blank, \\( T \\) is the language instruction, \\( M \\) is the mask condition, and \\( \\hat{X} \\) represents the inflated result. The mask \\( M \\) is a binary matrix with the size of \\( (H \\times (C + 1), W \\times L) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.532, + 0.312, + 0.905, + 0.368 + ], + "angle": 0, + "content": "\\[\nM (i, j) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} i \\in [ H \\times (C - 1), H \\times C) \\\\ & \\text {a n d} j \\in [ W \\times (L - 1), W \\times L), \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.375, + 0.907, + 0.482 + ], + "angle": 0, + "content": "where \\( M(i,j) = 1 \\) indicates that the pixel will be masked and generated by the infilling model. Equ. (2) masks the region in the last row and column, i.e., the target image. During training, we also randomly mask one of the first \\( L - 1 \\) grids with a probability of 0.5, promoting reverse generation shown in Sec. 5.1. For the inference stage, we can crop \\( \\hat{X} \\) to obtain the target image easily." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.495, + 0.907, + 0.678 + ], + "angle": 0, + "content": "Aligned optimization objective. A key benefit of this design is that our VisualCloze formulation shares a highly consistent objective with general image infilling models without architectural modifications or explicit input conditions. This consistency allows us to directly fine-tune advanced image infilling models using the newly constructed dataset while maximizing the utilization of the prior knowledge of foundation models. In contrast, existing task-specific models often require introducing additional learnable modules [38, 69] or adapting to extra condition inputs [61], which may compromise the native capabilities of the model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.69, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Language instructions. Note that the design of language instruction is also necessary for VisualCloze because it is responsible for defining the grid image layout, describing the caption of the image to be generated, and specifying the task intent when in-context examples are unavailable. In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the \\((C + 1)\\times W\\) layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. The details about the instructions are available in Appendix A. By restructuring the three components \\(X\\), \\(T\\), and \\(M\\) in Equ. (1), we achieve a unified multi-task framework for image generation with the general image infilling paradigm and support in-context learning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.244 + ], + "angle": 0, + "content": "Positional embedding. In the preceding section, all images are concatenated into a grid layout image and we can apply positional embedding (i.e., RoPE [57]) on this large image. However, a potential limitation lies in composing a grid image from in-context examples with varying aspect ratios. To overcome this issue, we leverage the 3D-RoPE in Flux.1-Fill-dev to concatenate the query and in-context examples along the temporal dimension, as shown in Fig. 5, effectively overcoming this issue without introducing any noticeable performance degradation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.252, + 0.31, + 0.269 + ], + "angle": 0, + "content": "4.3. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.274, + 0.485, + 0.609 + ], + "angle": 0, + "content": "We use FLUX.1-Fill-dev [33] as our foundation model, considering its outstanding performance among open-source image infilling models. In this work, LoRA [25] is chosen to fine-tune the model instead of fully fine-tuning it to reduce training costs and preserve the capabilities of the foundation model. The resulting LoRA can also be fused with other LoRAs in the community, enabling more widespread applications. Specifically, we set the rank of LoRA as 256. The model is tuned for 20,000 iterations with an accumulated batch size of 64 on \\(8 \\times \\mathrm{A}100\\) GPUs. We employ the AdamW optimizer with a learning rate of \\(1e^{-4}\\). Following FLUX.1-Fill-dev, we incorporate the lognorm noise strategy with dynamic time shifting. During training, the number of in-context examples is set up to 2 (i.e., \\(C\\) as defined in Sec. 4.2), while \\(L\\), the number of images involved in a task, varies between 2 and 4 in the Graph200K dataset. During inference, the number of in-context examples can be generalized to a larger number. To balance computational efficiency, each image is resized to the area of \\(384 \\times 384\\) or \\(512 \\times 512\\) before concatenating them into a grid layout. High-resolution outputs can be obtained in practical applications through simple post-up-scaling techniques [45]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.621, + 0.224, + 0.637 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.645, + 0.462, + 0.663 + ], + "angle": 0, + "content": "5.1. Qualitative Analysis of In-context Learning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.667, + 0.484, + 0.743 + ], + "angle": 0, + "content": "This section presents a series of experiments demonstrating the effectiveness of in-context learning across different tasks, especially those unseen during training. Based on our extensive experiments, we summarize four key findings that highlight the role of in-context learning." + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.754, + 0.329, + 0.77 + ], + "angle": 0, + "content": "In-Context Learning Findings 1" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.781, + 0.458, + 0.81 + ], + "angle": 0, + "content": "In-context learning can mitigate task confusion for seen tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Task ambiguity on seen tasks. The model occasionally experiences task confusion, failing to interpret the intended objective accurately, especially on dense prediction tasks. In-context learning effectively alleviates this issue" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.087, + 0.906, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.679, + 0.147, + 0.796, + 0.161 + ], + "angle": 0, + "content": "(a) Image to Pose" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.162, + 0.905, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.676, + 0.22, + 0.802, + 0.234 + ], + "angle": 0, + "content": "(b) Image to Depth" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.234, + 0.905, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.679, + 0.292, + 0.798, + 0.305 + ], + "angle": 0, + "content": "(c) Image to Edge" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.305, + 0.904, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.667, + 0.363, + 0.804, + 0.377 + ], + "angle": 0, + "content": "(d) Normal to Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.395, + 0.907, + 0.424 + ], + "angle": 0, + "content": "Figure 6. In-context learning mitigates the task ambiguity in seen tasks. We show three results using different initial noises." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.451, + 0.907, + 0.648 + ], + "angle": 0, + "content": "by providing task-specific demonstrations. For example, in Fig. 6 (a) and (c), the model may produce noisy results without in-context examples in pose estimation and edge detection, while increasing the number of in-context examples enhances the performance and stability. In depth estimation shown in Fig. 6 (b), in-context examples also improve the accuracy when the model originally makes inaccurate estimates, especially in distant areas. Additionally, in some tasks like conditional generation, we note that the model can generate satisfactory results stably even without in-context examples, as shown in Fig. 6 (d). However, the quantitative comparison in Tab. 1 still shows that using in-context learning can further improve the accuracy of task completion." + }, + { + "type": "title", + "bbox": [ + 0.541, + 0.66, + 0.752, + 0.675 + ], + "angle": 0, + "content": "In-Context Learning Findings 2" + }, + { + "type": "text", + "bbox": [ + 0.539, + 0.687, + 0.88, + 0.733 + ], + "angle": 0, + "content": "In-context learning supports generalization to unseen tasks, where providing more in-context examples could lead to more accurate generation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.765, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Generalization on unseen tasks. Beyond mitigating task confusion, in-context learning also enables the model to generalize to tasks unseen during training. Fig. 2 has shown the model can successfully generate frontal faces from side-view images and transfer editing instructions [8] through in-context learning, even though they are not encountered during training. Here, we present additional examples of unseen tasks. For instance, although the model is trained exclusively on image editing tasks involving object addi" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.085, + 0.291, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.085, + 0.491, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.277, + 0.493, + 0.331 + ], + "angle": 0, + "content": "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: change the setting to a winter scene. <\\editing instruction>" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.085, + 0.7, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.084, + 0.898, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.278, + 0.9, + 0.331 + ], + "angle": 0, + "content": "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: turn the color of sunglasses to green. " + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.336, + 0.907, + 0.379 + ], + "angle": 0, + "content": "Figure 7. Unseen Tasks: Although the image editing tasks seen by the model are only about object addition and object removal, it can still generalize to other types of editing tasks, such as environment modification (Left) and attribute transformation (Right), through in-context learning. More unseen tasks are shown in Fig. 2." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.388, + 0.281, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.388, + 0.478, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.575, + 0.483, + 0.631 + ], + "angle": 0, + "content": "Figure 8. Unseen Tasks: VisualCloze is capable of performing multi-subject driven generation [70], even though the model was only exposed to single subject-driven generation tasks during training. Best viewed by zooming in." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.669, + 0.483, + 0.79 + ], + "angle": 0, + "content": "tion and removal, it still generalizes to other types of editing tasks, such as environment changes and attribute modifications, as shown in Fig. 7. Furthermore, as demonstrated in Fig. 8, the model, trained solely on single-subject generation, can generate images preserving identities of multiple subjects. These results highlight that in-context learning is an effective guidance mechanism, enabling adaptation to novel tasks without retraining." + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.813, + 0.329, + 0.829 + ], + "angle": 0, + "content": "In-Context Learning Findings 3" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.84, + 0.458, + 0.886 + ], + "angle": 0, + "content": "In-context learning enables task unification, an unseen strategy that consolidating sub-tasks into a single step and generating intermediate results." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.392, + 0.907, + 0.603 + ], + "angle": 0, + "content": "Multi-task consolidation. Meanwhile, we also find that through in-context learning, we can consolidate multiple tasks into a single execution step, which can be viewed as another form of unseen task. Fig. 3 has shown two examples, where we 1) merge conditional generation and relighting shown on the left and 2) perform depth estimation, surface normal estimation, and edge detection simultaneously shown on the right. Similarly, Fig. 11 illustrates how we can combine multiple conditions for conditional generation to achieve finer control. For instance, generating a portrait based on keypoints provides only rough information about the location and body pose. In such cases, contour conditions can be used to control the attributes of other visual elements." + }, + { + "type": "title", + "bbox": [ + 0.54, + 0.63, + 0.753, + 0.646 + ], + "angle": 0, + "content": "In-Context Learning Findings 4" + }, + { + "type": "text", + "bbox": [ + 0.539, + 0.657, + 0.88, + 0.718 + ], + "angle": 0, + "content": "Different in-context learning examples lead to varying effects, where examples that can better convey mission intent can achieve better and more stable generation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.765, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Varying effects of different in-context examples. Following prior works [46, 52] on the prompt selection, we also find that different in-context examples could impact the generation quality. Specifically, it is crucial that in-context examples provide correct and strong guidance about the task intention. For example, as shown in Fig. 10 (left), when the side faces are more towards the front than in Fig. 10 (right), the success rate of correctly generating frontal faces has dropped dramatically." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.098, + 0.121, + 0.232 + ], + "angle": 270, + "content": "Two In-Context Examples" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.09, + 0.222, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.09, + 0.323, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.09, + 0.427, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.165, + 0.221, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.165, + 0.324, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.165, + 0.427, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.242, + 0.222, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.242, + 0.325, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.242, + 0.428, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.096, + 0.31, + 0.443, + 0.355 + ], + "angle": 0, + "content": "Task Prompt: In each row, a method uses[IMAGE1] gray-shaded depth map with distinct edges, [IMAGE2] Artistically rendered content for generating [IMAGE3] High-definition picture in a unique art style." + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.09, + 0.58, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.091, + 0.683, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.091, + 0.787, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.09, + 0.891, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.165, + 0.581, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.165, + 0.683, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.165, + 0.787, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.165, + 0.891, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.242, + 0.581, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.242, + 0.684, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.242, + 0.788, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.242, + 0.892, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.448, + 0.31, + 0.901, + 0.356 + ], + "angle": 0, + "content": "Task Prompt: Every row demonstrates how to transform [IMAGE1] an image with vivid details into [IMAGE2] gray-scale depth map with clear object boundaries, [IMAGE3] rgb normal map for bump mapping effects, [IMAGE4] soft-edged map from hed detection through a logical approach." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.361, + 0.907, + 0.404 + ], + "angle": 0, + "content": "Figure 9. Unseen Tasks: Through in-context learning, we can perform reverse generation from targets to conditions. For example, (a) decomposing the layout and style from a stylized image and (b) inferring the image, depth, and surface normal simultaneously from an edge map, which is the reverse task of Fig. 3 (Left)." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.414, + 0.275, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.414, + 0.47, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.558, + 0.28, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.558, + 0.472, + 0.619 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.634, + 0.483, + 0.689 + ], + "angle": 0, + "content": "Figure 10. Illustration of the impact of different in-context examples on in-context learning. In the second example on the left, the left and right faces are too biased towards the front, so they do not show the core goal of the task intention." + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.719, + 0.329, + 0.735 + ], + "angle": 0, + "content": "In-Context Learning Findings 5" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.746, + 0.458, + 0.792 + ], + "angle": 0, + "content": "In-context learning can guide bilateral generation, even for the reverse process that is unseen during training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Bilateral generation. In addition to generating the target from a set of given conditions, our model also shows the capability of reverse generation, i.e., inferring the underlying conditions from the target. Although our model has randomly treated one condition image as the target when" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.414, + 0.643, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.415, + 0.766, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.777, + 0.415, + 0.892, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.506, + 0.644, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.651, + 0.506, + 0.768, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.775, + 0.506, + 0.894, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.521, + 0.6, + 0.897, + 0.647 + ], + "angle": 0, + "content": "Task Prompt: Every row demonstrates how to transform [IMAGE1] human pose with colored lines for bone structure and [IMAGE2] canny map with sharp white edges and dark into [IMAGE3] a visually striking and clear picture through a logical approach." + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.66, + 0.907, + 0.703 + ], + "angle": 0, + "content": "Figure 11. Unseen Tasks: Unseen combinations of multiple tasks. For conditional generation, we integrate multiple conditions achieve more precise control. More examples are shown in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.901 + ], + "angle": 0, + "content": "training as described in Sec. 4.2, it can generalize to a more challenging and unseen setting during inference, i.e., inferring all conditional images from only the target image. For instance, as illustrated in Fig. 9 (left), the model can reverse-engineer both the original and the style reference images given a stylized image, demonstrating the ability to disentangle the content and style representations. Similarly, as shown in Fig. 9 (right), the model can generate the corresponding real image, depth estimation, and surface normal estimation from an edge image, representing the inverse task of Fig. 3 (left). The ability to perform such" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.093, + 0.089, + 0.907, + 0.484 + ], + "angle": 0, + "content": "
ConditionMethodContextControllabilityQualityText Consistency
F1 ↑RMSE ↓FID [23] ↓SSIM ↑MAN-IQA [75] ↑MUSIQ [30] ↑CLIP-Score [49] ↑
CannyControlNet [80]0.13-46.060.340.3145.4534.10
OminiControl [61]0.47-29.580.610.4461.4034.40
OneDiffusion [35]0.39-32.760.550.4659.9934.99
OmniGen [71]0.43-51.580.470.4762.6633.66
Oursdev00.39-30.360.610.4861.1335.03
Oursfill00.35-30.600.550.4964.3934.98
Oursfill10.36-31.340.550.4964.1234.96
Oursfill20.36-31.150.560.4964.0834.85
DepthControlNet [80]-23.7036.830.410.4460.1734.49
OminiControl [61]-21.4436.230.520.4460.1834.08
OneDiffusion [35]-10.3539.030.490.4960.4934.71
OmniGen [71]-15.0786.080.260.4964.9029.72
Oursdev0-25.0642.140.530.4658.9534.80
Oursfill0-10.3133.880.540.4864.8535.10
Oursfill1-9.9134.440.540.4964.3234.95
Oursfill2-9.6834.880.540.4864.2934.89
DeblurControlNet [80]-37.8253.280.490.4561.9233.80
OminiControl [61]-19.7026.170.850.4560.7034.53
OneDiffusion [35]-------
OmniGen [71]-------
Oursdev0-25.0356.760.740.3846.6833.52
Oursfill0-26.5340.590.740.4659.6234.56
Oursfill1-25.8736.930.760.4861.5834.82
Oursfill2-25.5736.280.760.4861.7734.82
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.494, + 0.907, + 0.524 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on conditioning generation and image restoration. The methods that train a specialist for each task are marked as gray color. Except for these methods, the best method is bolded, and the second best method is underlined." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.534, + 0.472, + 0.678 + ], + "angle": 0, + "content": "
MethodContextDINOv2CLIP-ICLIP-T
OminiControl [61]73.1787.7033.53
OneDiffusion [35]73.8886.9134.85
OmniGen [71]67.7383.4334.53
Oursdev078.0587.6835.06
Oursfill080.4189.6335.16
Oursfill179.3389.2235.02
Oursfill280.3289.3635.01
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.689, + 0.483, + 0.745 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison for subject-driven image generation. We report clip scores on text alignment and style consistency. Specialists are shaded in gray. Among the remaining methods, the best is emphasized in bold, while the second best is underlined." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.773, + 0.483, + 0.82 + ], + "angle": 0, + "content": "reverse tasks highlights the flexibility and robustness in understanding complex relationships between different types of image representations." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.832, + 0.231, + 0.848 + ], + "angle": 0, + "content": "5.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.903 + ], + "angle": 0, + "content": "We compare our method with universal generative models, including OmniGen [71] and OneDiffusion [35], as well as specialized models, such as ControlNet [80] and Omni-" + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.534, + 0.902, + 0.627 + ], + "angle": 0, + "content": "
text↑image↑
InstantStyle [64]0.270.60
OmniGen [71]0.270.52
Oursdev0.300.53
Oursfill0.290.55
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.637, + 0.907, + 0.695 + ], + "angle": 0, + "content": "Table 3. Quantitative comparison for style transfer. We report CLIP scores on text alignment and style consistency. The specialists are indicated in gray. Among the others, the top-performing one is highlighted in bold, and the second best is underlined." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.715, + 0.907, + 0.793 + ], + "angle": 0, + "content": "Control [61]. The details of the evaluation metrics are provided in Appendix C. Additionally, we fine-tune FLUX.1-dev [33] using the same settings as FLUX.1-Fill-dev for comparison and refer to the tuned models as Oursdev and Oursfill. The details of Oursdev are shown in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.903 + ], + "angle": 0, + "content": "For conditional generation and image restoration, we evaluate the models based on three criteria, i.e., controllability, visual quality, and text consistency, following the evaluation approach of OminiControl [61]. As shown in Tab. 1, our framework demonstrates comparable controllability to existing universal methods while achieving superior visual quality and text consistency. Compared to spe" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.51, + 0.938 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.085, + 0.482, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.288, + 0.483, + 0.318 + ], + "angle": 0, + "content": "Figure 12. Comparison between Flux.1-dev (Oursdev) and Flux.1-Fill-dev (Oursfill)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.343, + 0.483, + 0.373 + ], + "angle": 0, + "content": "cialized methods, our model performs on par with the best results and even outperforms them on the depth-to-image." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.374, + 0.483, + 0.479 + ], + "angle": 0, + "content": "In the style transfer task, we measure text consistency and style alignment using the CLIP [49] model. As reported in Tab. 3, our method outperforms OmniGen [71] by \\(2\\%\\) and \\(3\\%\\) in text alignment and style consistency, respectively. Even when compared with InstantStyle-Plus [81], a specialized model, we achieve a \\(2\\%\\) improvement in text consistency, with only a slight decrease in style alignment." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.48, + 0.483, + 0.585 + ], + "angle": 0, + "content": "Furthermore, we evaluate the models on subject-driven image generation and report semantic alignment using the DINOv2 [47], CLIP-I [49], and CLIP-T [49] scores. Across all these metrics, our method consistently delivers improvements, as shown in Tab. 2. For example, compared to the specialized model OminiControl [61], we achieve improvements of \\(7.15\\%\\), \\(1.66\\%\\), and \\(1.48\\%\\) in these three scores." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.483, + 0.734 + ], + "angle": 0, + "content": "Advantages of the infilling model. Our method (Oursfill) is built on FLUX.1-Fill-dev [33], which shares the same objective as our unified image generation framework. To verify its effectiveness, we also fine-tune Fill.1-dev [33] (Oursdev) using identical settings. Unlike Oursfill, which requires no modifications, Oursdev necessitates model adaptations for universal image generation, as shown in Appendix B. Despite its simplicity, Oursfill achieves superior performance across multiple tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.483, + 0.901 + ], + "angle": 0, + "content": "As shown in Tab. 1, \\( \\text{Ours}_{\\text{dev}} \\) achieves a higher F1 score than \\( \\text{Ours}_{\\text{fill}} \\) in the canny-to-image generation. However, in other tasks, \\( \\text{Ours}_{\\text{fill}} \\) demonstrates a significant advantage. For instance, in the depth-to-image generation, \\( \\text{Ours}_{\\text{fill}} \\) reduces RMSE from 25.06 to 10.31. In the deblurring task, \\( \\text{Ours}_{\\text{fill}} \\) achieves superior quality by lowering RMSE while maintaining a higher SSIM. In subject-driven image generation, Tab. 2 shows that \\( \\text{Ours}_{\\text{fill}} \\) consistently outperforms \\( \\text{Ours}_{\\text{dev}} \\). Additionally, in semantic-invariant style transfer, \\( \\text{Ours}_{\\text{fill}} \\) delivers comparable performance to \\( \\text{Ours}_{\\text{dev}} \\), as shown in Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.198 + ], + "angle": 0, + "content": "Fig. 12 presents a visual comparison, where Oursfill demonstrates clear advantages over Oursdev. Notably, in the depth-to-image generation, images produced by Oursdev frequently exhibit diagonal streak artifacts, which significantly degrade visual fidelity. Considering the advantages in performance, visual quality, and architectural efficiency, Oursfill stands out as the superior model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.211, + 0.907, + 0.422 + ], + "angle": 0, + "content": "Quantitative comparison on in-context learning. Here, we further analyze the impact of in-context learning on seen tasks. Tab. 1 demonstrates the impact of in-context learning on different image generation tasks. Under the canny condition, our method without in-context examples achieves an FID of 30.60, which improves to 31.15 with two in-context examples. When conditioned on depth, the RMSE decreases from 10.31 to 9.68 as the number of in-context examples increases, indicating enhanced structural consistency. Similarly, in the deblurring task, RMSE decreases from 26.53 to 25.57, reflecting improved fidelity to the original content. These results highlight in-context learning as an effective guidance mechanism, enabling the model to better align with the task intent." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.437, + 0.637, + 0.451 + ], + "angle": 0, + "content": "6. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.462, + 0.906, + 0.583 + ], + "angle": 0, + "content": "While our model demonstrates strong stability across most in-domain tasks, it still exhibits some instability in specific tasks, such as object removal. This limitation suggests that the performance is sensitive to certain task characteristics. Additionally, the stability of the model on unseen tasks is still insufficient. Apart from the difficulty of the task and the difference with seen tasks, ambiguous in-context examples may also lead to less stable results, as discussed in Sec. 5.1." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.604, + 0.634, + 0.62 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.901 + ], + "angle": 0, + "content": "In this work, we propose VisualCloze, a universal image generation framework that addresses key challenges in existing methods, including generalizable instruction design, appropriate task distributions, and unified architectural design. Rather than relying solely on language-based instructions to convey task intent, we re-propose visual in-context learning, enabling the model to learn tasks from a few demonstrations. This approach improves generalization to unseen tasks and reduces task ambiguity. To overcome the sparsity of visual task distributions, which limits the learning of transferable knowledge, we construct Graph200K, a graph-structured dataset that establishes interrelated tasks. In this compact task space, the model is promoted to learn transferable representations and improve adaptability. Meanwhile, we identify the consistent objective between image infilling and our universal generation formulation, allowing us to seamlessly adapt general-purpose infilling models for universal generation without" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.486, + 0.152 + ], + "angle": 0, + "content": "architectural modifications. Experimental results show that our approach supports a diverse set of in-domain tasks using in-context learning while demonstrating strong generalization to unseen tasks." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.166, + 0.188, + 0.182 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.192, + 0.483, + 0.329 + ], + "angle": 0, + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.331, + 0.483, + 0.371 + ], + "angle": 0, + "content": "[2] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In ICLR, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.375, + 0.482, + 0.415 + ], + "angle": 0, + "content": "[3] Ivana Balazevic, David Steiner, Nikhil Parthasarathy, Relja Arandjelovic, and Olivier J Henaff. Towards in-context scene understanding. In NeurIPS, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.418, + 0.482, + 0.459 + ], + "angle": 0, + "content": "[4] Amir Bar, Yossi Gandelsman, Trevor Darrell, Amir Globerson, and Alexei A Efros. Visual prompting via image inpainting. In NeurIPS, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.461, + 0.482, + 0.515 + ], + "angle": 0, + "content": "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. *NeurIPS*, 2020. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.518, + 0.482, + 0.543 + ], + "angle": 0, + "content": "[6] John Canny. A computational approach to edge detection. IEEE TPAMI, 1986. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.546, + 0.482, + 0.587 + ], + "angle": 0, + "content": "[7] Z. Cao, G. Hidalgo Martinez, T. Simon, S. Wei, and Y. A. Sheikh. Openpose: Realtime multi-person 2d pose estimation using part affinity fields. IEEE TPAMI, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.59, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[8] Lan Chen, Qi Mao, Yuchao Gu, and Mike Zheng Shou. Edit transfer: Learning image editing via vision in-context relations. arXiv preprint arXiv:2503.13327, 2025. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.633, + 0.482, + 0.713 + ], + "angle": 0, + "content": "[9] Xi Chen, Zhifei Zhang, He Zhang, Yuqian Zhou, Soo Ye Kim, Qing Liu, Yijun Li, Jianming Zhang, Nanxuan Zhao, Yilin Wang, Hui Ding, Zhe Lin, and Hengshuang. Unireal: Universal image generation and editing via learning real-world dynamics. arXiv preprint arXiv:2412.07774, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.717, + 0.483, + 0.77 + ], + "angle": 0, + "content": "[10] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.774, + 0.483, + 0.814 + ], + "angle": 0, + "content": "[11] Seunghwan Choi, Sunghyun Park, Minsoo Lee, and Jaegul Choo. Viton-hd: High-resolution virtual try-on via misalignment-aware normalization. In CVPR, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.817, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[12] Zheng Chong, Xiao Dong, Haoxiang Li, shiyue Zhang, Wenqing Zhang, Hanqing Zhao, xujie zhang, Dongmei Jiang, and Xiaodan Liang. CatVTON: Concatenation is all you need for virtual try-on with diffusion models. In ICLR, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.873, + 0.483, + 0.9 + ], + "angle": 0, + "content": "[13] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In NeurIPS, 2021. 4" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.192, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.093, + 0.905, + 0.16 + ], + "angle": 0, + "content": "[14] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Tianyu Liu, Baobao Chang, Xu Sun, Lei Li, and Zhifang Sui. A survey on in-context learning. arXiv preprint arXiv:2301.00234, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.164, + 0.905, + 0.259 + ], + "angle": 0, + "content": "[15] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.262, + 0.905, + 0.302 + ], + "angle": 0, + "content": "[16] Christopher Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.306, + 0.905, + 0.385 + ], + "angle": 0, + "content": "[17] Peng Gao, Le Zhuo, Dongyang Liu, Ruoyi Du, Xu Luo, Longtian Qiu, Yuhang Zhang, Chen Lin, Rongjie Huang, Shijie Geng, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.39, + 0.905, + 0.442 + ], + "angle": 0, + "content": "[18] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.447, + 0.904, + 0.487 + ], + "angle": 0, + "content": "[19] Golnaz Ghiasi, Barret Zoph, Ekin D. Cubuk, Quoc V. Le, and Tsung-Yi Lin. Multi-task self-training for learning general representations. In ICCV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.49, + 0.905, + 0.53 + ], + "angle": 0, + "content": "[20] Geonmo Gu, Byungsoo Ko, SeoungHyun Go, Sung-Hyun Lee, Jingeun Lee, and Minchul Shin. Towards light-weight and real-time line segment detection. In AAAI, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.905, + 0.572 + ], + "angle": 0, + "content": "[21] Aaron Hertzmann. Algorithms for rendering in artistic styles. PhD thesis, New York University, Graduate School of Arts and Science, 2001. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.576, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[22] Aaron Hertzmann, Charles E. Jacobs, Nuria Oliver, Brian Curless, and David H. Salesin. Image analogies. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, 2001. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.633, + 0.905, + 0.7 + ], + "angle": 0, + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 10, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.704, + 0.905, + 0.73 + ], + "angle": 0, + "content": "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.733, + 0.905, + 0.785 + ], + "angle": 0, + "content": "[25] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In ICLR, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.789, + 0.905, + 0.843 + ], + "angle": 0, + "content": "[26] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Huanzhang Dou, Yupeng Shi, Yutong Feng, Chen Liang, Yu Liu, and Jingren Zhou. Group diffusion transformers are unsupervised multitask learners. arXiv preprint arxiv:2410.15027, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.9 + ], + "angle": 0, + "content": "[27] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arxiv:2410.23775, 2024. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[28] Shijie Huang, Yiren Song, Yuxuan Zhang, Hailong Guo, Xueyin Wang, Mike Zheng Shou, and Jiaming Liu. Photodoodle: Learning artistic image editing from few-shot pairwise data. arXiv preprint arXiv:2502.14397, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.204 + ], + "angle": 0, + "content": "[29] Zehuan Huang, Yuanchen Guo, Haoran Wang, Ran Yi, Lizhuang Ma, Yan-Pei Cao, and Lu Sheng. Mv-adapter: Multi-view consistent image generation made easy. arXiv preprint arXiv:2412.03632, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.205, + 0.482, + 0.259 + ], + "angle": 0, + "content": "[30] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 10, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.261, + 0.482, + 0.3 + ], + "angle": 0, + "content": "[31] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.302, + 0.482, + 0.355 + ], + "angle": 0, + "content": "[32] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.357, + 0.482, + 0.397 + ], + "angle": 0, + "content": "[33] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 3, 4, 5, 7, 10, 11, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.4, + 0.482, + 0.468 + ], + "angle": 0, + "content": "[34] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.47, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[35] Duong H. Le, Tuan Pham, Sangho Lee, Christopher Clark, Aniruddha Kembhavi, Stephan Mandt, Ranjay Krishna, and Jiasen Lu. One diffusion to generate them all, 2024. 2, 3, 4, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.525, + 0.482, + 0.566 + ], + "angle": 0, + "content": "[36] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.568, + 0.482, + 0.621 + ], + "angle": 0, + "content": "[37] Kunchang Li, Yali Wang, Junhao Zhang, Peng Gao, Guanglu Song, Yu Liu, Hongsheng Li, and Yu Qiao. Uniformer: Unifying convolution and self-attention for visual recognition. IEEE TPAMI, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.623, + 0.482, + 0.677 + ], + "angle": 0, + "content": "[38] Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, MingMing Cheng, and Ying Shan. Photomaker: Customizing realistic human photos via stacked id embedding. In CVPR, 2024. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.679, + 0.482, + 0.747 + ], + "angle": 0, + "content": "[39] Weifeng Lin, Xinyu Wei, Renrui Zhang, Le Zhuo, Shitian Zhao, Siyuan Huang, Junlin Xie, Yu Qiao, Peng Gao, and Hongsheng Li. Pixwizard: Versatile image-to-image visual assistant with open-language instructions. arXiv preprint arXiv:2409.15278, 2024. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.749, + 0.482, + 0.789 + ], + "angle": 0, + "content": "[40] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In ICLR, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.79, + 0.482, + 0.858 + ], + "angle": 0, + "content": "[41] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.86, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[42] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 4" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.092, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[43] Yihao Liu, Xiangyu Chen, Xianzheng Ma, Xintao Wang, Jiantao Zhou, Yu Qiao, and Chao Dong. Unifying image processing as visual prompting question answering. arXiv preprint arXiv:2310.10513, 2023. 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.149, + 0.905, + 0.205 + ], + "angle": 0, + "content": "[44] Chaojie Mao, Jingfeng Zhang, Yulin Pan, Zeyinzi Jiang, Zhen Han, Yu Liu, and Jingren Zhou. Ace++: Instruction-based image creation and editing via context-aware content filling. arXiv preprint arXiv:2501.02487, 2025. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.207, + 0.905, + 0.261 + ], + "angle": 0, + "content": "[45] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.264, + 0.905, + 0.319 + ], + "angle": 0, + "content": "[46] Noor Nashid, Mifta Sintaha, and Ali Mesbah. Retrieval-based prompt selection for code-related few-shot learning. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pages 2450-2462. IEEE, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.321, + 0.905, + 0.389 + ], + "angle": 0, + "content": "[47] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 11, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.391, + 0.905, + 0.418 + ], + "angle": 0, + "content": "[48] William Peebles and Saining Xie. Scalable diffusion models with transformers. In ICCV, 2023. 4, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.42, + 0.905, + 0.502 + ], + "angle": 0, + "content": "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10, 11, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.504, + 0.905, + 0.601 + ], + "angle": 0, + "content": "[50] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, ChaoYuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.603, + 0.905, + 0.672 + ], + "angle": 0, + "content": "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.674, + 0.905, + 0.714 + ], + "angle": 0, + "content": "[52] Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. arXiv preprint arXiv:2112.08633, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.717, + 0.905, + 0.744 + ], + "angle": 0, + "content": "[53] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.746, + 0.905, + 0.8 + ], + "angle": 0, + "content": "[54] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 2, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.802, + 0.905, + 0.858 + ], + "angle": 0, + "content": "[55] Dianmo Sheng, Dongdong Chen, Zhentao Tan, Qiankun Liu, Qi Chu, Jianmin Bao, Tao Gong, Bin Liu, Shengwei Xu, and Nenghai Yu. Towards more unified in-context visual understanding. In CVPR, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.86, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[56] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "generation in any style. arXiv preprint arXiv:2306.00983, 2023.16" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.122, + 0.483, + 0.164 + ], + "angle": 0, + "content": "[57] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.165, + 0.483, + 0.22 + ], + "angle": 0, + "content": "[58] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.222, + 0.483, + 0.275 + ], + "angle": 0, + "content": "[59] Yanpeng Sun, Qiang Chen, Jian Wang, Jingdong Wang, and Zechao Li. Exploring effective factors for improving visual in-context learning. arXiv preprint arXiv:2304.04748, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.277, + 0.483, + 0.332 + ], + "angle": 0, + "content": "[60] Yasheng SUN, Yifan Yang, Houwen Peng, Yifei Shen, Yuqing Yang, Han Hu, Lili Qiu, and Hideki Koike. Imagebrush: Learning visual in-context instructions for exemplar-based image manipulation. In NeurIPS, 2023. 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.334, + 0.483, + 0.39 + ], + "angle": 0, + "content": "[61] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 4, 5, 6, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.391, + 0.442, + 0.405 + ], + "angle": 0, + "content": "[62] Paints-Undo Team. Paints-undo github page, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.407, + 0.483, + 0.46 + ], + "angle": 0, + "content": "[63] Alex Jinpeng Wang, Linjie Li, Yiqi Lin, Min Li, Lijuan Wang, and Mike Zheng Shou. Leveraging visual tokens for extended text contexts in multi-modal learning. NeurIPS, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.462, + 0.483, + 0.518 + ], + "angle": 0, + "content": "[64] Haofan Wang, Peng Xing, Renyuan Huang, Hao Ai, Qixun Wang, and Xu Bai. Instantstyle-plus: Style transfer with content-preserving in text-to-image generation. arXiv preprint arXiv:2407.00788, 2024. 2, 4, 5, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.519, + 0.483, + 0.616 + ], + "angle": 0, + "content": "[65] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.617, + 0.483, + 0.659 + ], + "angle": 0, + "content": "[66] Xinlong Wang, Wen Wang, Yue Cao, Chunhua Shen, and Tiejun Huang. Images speak in images: A generalist painter for in-context visual learning. In CVPR, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.66, + 0.483, + 0.702 + ], + "angle": 0, + "content": "[67] Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, and Tiejun Huang. Seggpt: Towards segmenting everything in context. In ICCV, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.703, + 0.483, + 0.758 + ], + "angle": 0, + "content": "[68] Zhendong Wang, Yifan Jiang, Yadong Lu, yelong shen, Pengcheng He, Weizhu Chen, Zhangyang Wang, and Mingyuan Zhou. In-context learning unlocked for diffusion models. In NeurIPS, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.76, + 0.483, + 0.816 + ], + "angle": 0, + "content": "[69] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image editing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[70] Shaojin Wu, Mengqi Huang, Wenxu Wu, Yufeng Cheng, Fei Ding, and Qian He. Less-to-more generalization: Unlocking more controllability by in-context generation. arXiv preprint arXiv:2504.02160, 2025. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.872, + 0.483, + 0.902 + ], + "angle": 0, + "content": "[71] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.483, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2, 3, 4, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.121, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[72] Saining Xie and Zhuowen Tu. Holistically-nested edge detection. In CVPR, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.149, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[73] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, Fisher Yu, Dacheng Tao, and Andreas Geiger. Unifying flow, stereo and depth estimation. IEEE TPAMI, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.191, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[74] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.233, + 0.905, + 0.315 + ], + "angle": 0, + "content": "[75] Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, and Yujiu Yang. Maniaq: Multi-dimension attention network for no-reference image quality assessment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1191-1200, 2022. 10, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.316, + 0.905, + 0.37 + ], + "angle": 0, + "content": "[76] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.371, + 0.905, + 0.44 + ], + "angle": 0, + "content": "[77] Fanghua Yu, Jinjin Gu, Zheyuan Li, Jinfan Hu, Xiangtao Kong, Xintao Wang, Jingwen He, Yu Qiao, and Chao Dong. Scaling up to excellence: Practicing model scaling for photo-realistic image restoration in the wild. arXiv preprint arXiv:2401.13627, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.441, + 0.905, + 0.537 + ], + "angle": 0, + "content": "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.538, + 0.905, + 0.566 + ], + "angle": 0, + "content": "[79] Hayoung Yun and Hanjoo Cho. Achievement-based training progress balancing for multi-task learning. In ICCV, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.567, + 0.905, + 0.607 + ], + "angle": 0, + "content": "[80] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, 2023. 3, 4, 5, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.608, + 0.905, + 0.662 + ], + "angle": 0, + "content": "[81] Yuxin Zhang, Nisha Huang, Fan Tang, Haibin Huang, Chongyang Ma, Weiming Dong, and Changsheng Xu. Inversion-based style transfer with diffusion models. In CVPR, 2023. 2, 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.664, + 0.905, + 0.704 + ], + "angle": 0, + "content": "[82] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? In NeurIPS, 2023. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.706, + 0.905, + 0.761 + ], + "angle": 0, + "content": "[83] Canyu Zhao, Mingyu Liu, Huanyi Zheng, Muzhi Zhu, Zhiyue Zhao, Hao Chen, Tong He, and Chunhua Shen. Disception: A generalist diffusion model for visual perceptual tasks. arXiv preprint arXiv:2502.17157, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.761, + 0.905, + 0.817 + ], + "angle": 0, + "content": "[84] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CAAI Artificial Intelligence Research, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.905, + 0.858 + ], + "angle": 0, + "content": "[85] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.859, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[86] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024. 4" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[87] Muzhi Zhu, Yang Liu, Zekai Luo, Chenchen Jing, Hao Chen, Guangkai Xu, Xinlong Wang, and Chunhua Shen. Unleashing the potential of the diffusion model in few-shot semantic segmentation. In NeurIPS, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.15, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[88] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Xiangyang Zhu, Fu-Yun Wang, Zhanyu Ma, Xu Luo, Zehan Wang, Kaipeng Zhang, Lirui Zhao, Si Liu, Xiangyu Yue, Wanli Ouyang, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina next: Making lumina-t2x stronger and faster with next-dit. In NeurIPS, 2024. 2, 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.367, + 0.108 + ], + "angle": 0, + "content": "Appendix A. Instruction Format" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.116, + 0.484, + 0.253 + ], + "angle": 0, + "content": "In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. Fig. 13 illustrates the instructions for concept fusion of style, subject, and layout (Fig. 13 upper) and image editing with reference (Fig. 13 bottom). The content instruction is omitted for some tasks that provide strong visual cues in conditions, like style transfer." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.268, + 0.471, + 0.286 + ], + "angle": 0, + "content": "Appendix B. Fine-tuning FLUX.1-dev Model" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.294, + 0.485, + 0.611 + ], + "angle": 0, + "content": "Apart from FLUX.1-Fill-dev, we also adapt our method to FLUX.1-dev [33], a common text-to-image generative model. Unlike the infilling model that shares a consistent objective with universal image generation, FLUX.1-dev requires customized modifications to process clean condition images and noise target images. Specifically, after concatenating images in a grid layout like the infilling model, we always keep the region corresponding to the conditions as clean latent embeddings throughout the sampling process. This strategy requires modifications in image sampling because FLUX.1-Fill-dev takes noise latent embeddings as input. Moreover, for the adaLN-Zero block [48], it is critical to calculate the separate mean and shift parameters for the regions of clean conditions and noise target by feeding \\( T = 0 \\) and \\( T = t \\) into the adaLN-Zero, respectively. \\( t \\) indicates the timestep in each sampling step and gradually increases from 0 to 1 along the sampling process. This strategy aligns with the pre-training domain of FLUX.1-dev, where different noise levels correspond to different mean and shift. As shown in Fig. 14, this strategy ensures the visual fidelity." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.628, + 0.367, + 0.645 + ], + "angle": 0, + "content": "Appendix C. Evaluation Metrics" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.653, + 0.325, + 0.669 + ], + "angle": 0, + "content": "C.1. Conditioning Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.676, + 0.483, + 0.722 + ], + "angle": 0, + "content": "We assess the models from controllability, quality, and text consistency to evaluate image generation quality in conditioning generation and image restoration tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.736, + 0.483, + 0.827 + ], + "angle": 0, + "content": "Controllability. For conditional image generation, we measure the difference between the input conditions and those extracted from generated images. Specifically, we calculate the F1 Score for the cany-to-image task and RMSE for the depth-to-image task. Additionally, for deblurring, we measure the RMSE between original and restored images." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.841, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Generation quality. We measure the Generation quality using FID [23], SSIM, MAN-IQA [75], and MAN-IQA [75]. FID [23] measures the similarity between generated and real image feature distributions. SSIM evalu" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.907, + 0.184 + ], + "angle": 0, + "content": "ates perceptual quality by comparing luminance, contrast, and structural patterns between images. It calculates local patch statistics and combines them into a composite score ranging from \\(-1\\) to 1, with higher values indicating better structural preservation. MANIQA [75] and MUSIQ [30] leverage neural networks to predict image quality scores." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.201, + 0.907, + 0.261 + ], + "angle": 0, + "content": "Text consistency. Leveraging the powerful multi-modal capability of CLIP [49], we also measure the semantic alignment between generated images and text prompts, which reflects how the model follows instructions." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.27, + 0.762, + 0.287 + ], + "angle": 0, + "content": "C.2. Subject Driven Generation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.292, + 0.907, + 0.415 + ], + "angle": 0, + "content": "Following DreamBooth [54] and BLIP-Diffusion [36], we measure DINOv2 [47], CLIP-I [49], and CLIP-T scores for the comparison of subject-driven image generation. DINOv2 [47] and CLIP-I scores measure the alignment between the reference subject and generated images through cosine similarity and CLIP score, respectively. CLIP-T measures the alignment between the generated image and the corresponding text prompt." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.423, + 0.665, + 0.438 + ], + "angle": 0, + "content": "C.3. Style Transfer" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.445, + 0.909, + 0.612 + ], + "angle": 0, + "content": "Following StyleDrop [56], we assess the performance of style transfer according to text consistency and style alignment. For text alignment, we measure the cosine similarity between embeddings of generated images and text prompts, where the embeddings are extracted by CLIP [49]. Regarding style consistency, we measure the cosine similarity between embeddings of generated images and style reference. Note that these two metrics should be considered together because the style consistency will reach 1.0 if the model collapses, where the model completely copies style reference as a composite image and ignores text instructions." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.088, + 0.635, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.096, + 0.786, + 0.11 + ], + "angle": 0, + "content": "Layout instruction:" + }, + { + "type": "text", + "bbox": [ + 0.658, + 0.123, + 0.85, + 0.165 + ], + "angle": 0, + "content": "12 images are organized into a grid of 3 rows and 4 columns, evenly spaced." + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.182, + 0.77, + 0.195 + ], + "angle": 0, + "content": "Task instruction:" + }, + { + "type": "text", + "bbox": [ + 0.657, + 0.206, + 0.87, + 0.332 + ], + "angle": 0, + "content": "Each row describes a process that begins with [IMAGE1] white edge lines on black from canny detection, [IMAGE2] Photo with a strong artistic theme, [IMAGE3] a reference image showcasing the dominant object and results in [IMAGE4] High-quality visual with distinct artistic touch." + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.339, + 0.791, + 0.352 + ], + "angle": 0, + "content": "Content instruction:" + }, + { + "type": "text", + "bbox": [ + 0.661, + 0.362, + 0.678, + 0.379 + ], + "angle": 0, + "content": "0" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.418, + 0.635, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.294, + 0.82, + 0.458, + 0.833 + ], + "angle": 0, + "content": "(a) Concatenated images" + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.442, + 0.786, + 0.456 + ], + "angle": 0, + "content": "Layout instruction:" + }, + { + "type": "text", + "bbox": [ + 0.658, + 0.471, + 0.86, + 0.513 + ], + "angle": 0, + "content": "A 3x3 grid containing 9 images, aligned in a clean and structured layout" + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.533, + 0.77, + 0.546 + ], + "angle": 0, + "content": "Task instruction:" + }, + { + "type": "text", + "bbox": [ + 0.657, + 0.559, + 0.87, + 0.645 + ], + "angle": 0, + "content": "Every row provides a step-by-step guide to evolve [IMAGE1] a reference image with the main subject included, [IMAGE2] an image with flawless clarity into [IMAGE3] a high-quality image." + }, + { + "type": "title", + "bbox": [ + 0.658, + 0.66, + 0.791, + 0.672 + ], + "angle": 0, + "content": "Content instruction:" + }, + { + "type": "text", + "bbox": [ + 0.658, + 0.687, + 0.87, + 0.786 + ], + "angle": 0, + "content": "The bottom-right corner image presents: A glossy gel nail polish bottle. At the edge of a bustling city park, this item rests on vibrant green grass, captured with a subtle bokeh effect as joggers and pets move in the background." + }, + { + "type": "title", + "bbox": [ + 0.682, + 0.819, + 0.852, + 0.833 + ], + "angle": 0, + "content": "(b) Language instructions" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.835, + 0.907, + 0.863 + ], + "angle": 0, + "content": "Figure 13. Examples of language instructions that contain prompts about the layout of the concatenated image, task intent, and content of the target image." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.158, + 0.373, + 0.226, + 0.386 + ], + "angle": 0, + "content": "Condition" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.389, + 0.295, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.373, + 0.412, + 0.387 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.389, + 0.496, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.373, + 0.635, + 0.386 + ], + "angle": 0, + "content": "Condition" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.389, + 0.703, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.782, + 0.373, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.389, + 0.905, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.495, + 0.288, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.495, + 0.496, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.495, + 0.702, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.495, + 0.905, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.593, + 0.385, + 0.606 + ], + "angle": 0, + "content": "(a) separate mean and shift" + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.593, + 0.791, + 0.606 + ], + "angle": 0, + "content": "(b) unified mean and shift" + }, + { + "type": "image_caption", + "bbox": [ + 0.281, + 0.615, + 0.715, + 0.629 + ], + "angle": 0, + "content": "Figure 14. Effects of separate mean and shift in fine-tuning FLUX.1-dev." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "18" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e62c9c1ef3ec4f062fd3a92ee243d517117d2f63 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5572fb787d1e4c58091727b29a4643d83eb73991dd089d3f3fcea18c39df5b4e +size 8441002 diff --git a/data/2025/2504_07xxx/2504.07960/full.md b/data/2025/2504_07xxx/2504.07960/full.md new file mode 100644 index 0000000000000000000000000000000000000000..48812f9cdd942363de465d109a7d579e34f1aa06 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/full.md @@ -0,0 +1,732 @@ +# VisualCloze: A Universal Image Generation Framework via Visual In-Context Learning + +Zhong-Yu Li $^{1,4*}$ Ruoyi Du $^{2,4*}$ Juncheng Yan $^{3,4}$ Le Zhuo $^{4}$ Qilong Wu $^{4}$ + +Zhen Li $^{5\dagger}$ Peng Gao $^{4}$ Zhanyu Ma $^{2}$ Ming-Ming Cheng $^{1\dagger}$ + +$^{1}$ VCIP, CS, Nankai University $^{2}$ Beijing University of Posts and Telecommunications + +$^{3}$ Tsinghua University $^{4}$ Shanghai AI Laboratory $^{5}$ The Chinese University of Hong Kong + +Project page: https://visualcloze.github.io + +# Understand + +the task + +![](images/84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg) + +![](images/249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg) + +In-context examples + +![](images/fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg) + +![](images/4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg) + +Style + + +Subject = + +![](images/75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg) + +![](images/1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg) + +![](images/5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg) + +![](images/2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg) + +![](images/aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg) + +![](images/c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg) + +![](images/bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg) + +![](images/7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg) + +Dense Prediction + +Pose Estimation + +Fill blank grid + +by reasoning + +# Visual Prompt + +![](images/f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg) + +![](images/092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg) + +Target + +![](images/a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg) + +![](images/2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg) + +Extend to diverse tasks + +![](images/4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg) + +Image Restoration + +![](images/863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg) + +![](images/9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg) + +![](images/d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg) + +![](images/2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg) +Subject + Layout + Style + +![](images/940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg) +Out + Style + +![](images/508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg) + +![](images/24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg) + +![](images/93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg) + +Subject-driven + +![](images/8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg) +# +$\therefore m : x = 1$ 或 ${3x} + {4y} + 1 = 0$ + +![](images/2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg) + +![](images/c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg) + +![](images/aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg) + +![](images/aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg) + +![](images/2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg) +Relighting + +![](images/f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg) +Virtual Try-On + +![](images/ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg) + +![](images/9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg) + +![](images/c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg) +Style Transfer + +![](images/b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg) +ansfer + +![](images/47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg) + +![](images/ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg) +Editing (Add) + +![](images/1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg) + +![](images/1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg) + +![](images/66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg) + +![](images/ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg) +Subject-driven Editing +even Editing + +![](images/ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg) + +![](images/7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg) + +![](images/29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg) +Figure 1. The top left illustrates our universal image generation framework based on visual in-context learning. Given one query of a specific task, the generative model learns the task by observing a few in-context examples presented as demonstrations. For each task, the generation result is indicated by a red box. + +![](images/ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg) + +![](images/b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg) + +![](images/e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg) + +![](images/1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg) + +![](images/7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg) +Multi-View + +![](images/1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg) + +![](images/a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg) +Multi-View + +![](images/8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg) + +![](images/df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg) +Visual Prompt + +![](images/4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg) +Without In-context Example +Visual Prompt + +![](images/a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg) +Target + +![](images/225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg) ++One In-context Example + +![](images/7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg) + +![](images/2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg) + +![](images/bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg) +Visual Prompt +face, to generate [IMAGE3] that faces the center of the lens. The last row is: making [IMAGE1] the standing woman the final row is: the woman's frontal face that faces the center of the lens. [IMAGE2] sit down and give the thumbs up. Figure 2. Unseen Tasks : Generalizing to tasks unseen during training via in-context learning. More in-context examples lead to more accurate results. + +![](images/6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg) +Visual Prompt + +![](images/49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg) +Target + +![](images/9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg) ++ Two In-context Examples + +![](images/1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg) + +![](images/865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg) + +![](images/590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg) + +![](images/2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg) + +![](images/35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg) + +![](images/1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg) +Visual Prompt + +![](images/de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg) +Visual Prompt + +![](images/8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg) +Target + +![](images/87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg) +Without In-context Example +Visual Prompt + +![](images/0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg) +Target ++ Two In-context Example + +![](images/82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg) ++ One In-context Example + +![](images/6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg) + +![](images/5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg) + +![](images/4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg) + +![](images/0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg) +Visual Prompt +Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: making [IMAGE1] the standing woman [IMAGE2] sit down and give the thumbs up. + +![](images/6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg) +Target + +![](images/62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg) + +![](images/0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg) + +![](images/7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg) + +![](images/1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg) + +# Abstract + +Recent progress in diffusion models significantly advances various image generation tasks. However, the current mainstream approach remains focused on building task-specific models, which have limited efficiency when supporting a wide range of different needs. While universal models attempt to address this limitation, they face critical challenges, including generalizable task instruction, appropriate task distributions, and unified architectural design. To tackle these challenges, we propose VisualCloze, a universal image generation framework, which supports a wide range of in-domain tasks, generalization to unseen ones, unseen unification of multiple tasks, and reverse generation. Unlike existing methods that rely on language-based task instruction, leading to task ambiguity and weak generalization, we integrate visual in-context learning, allowing models to identify tasks from visual demonstrations. Meanwhile, the inherent sparsity of visual task distributions hampers the learning of transferable knowledge across tasks. To this end, we introduce Graph200K, a graph-structured dataset that establishes various interrelated tasks, enhancing task density and transferable knowledge. Furthermore, we uncover that our unified image generation formulation shared a consistent objective with image infilling, enabling us to leverage the strong generative priors of pre-trained infilling models without modifying the architectures. + +# 1. Introduction + +Recent advancements in image generation, propelled by the progress of diffusion models [15, 33, 88], have led to a + +wide range of applications, including image editing [69], style transfer [64, 81], virtual try-on [11, 12], and personalized generation [38, 54], among others. However, these tasks typically require task-specific models, which limit efficiency and scalability for real-world applications. In recent years, there has been growing interest in universal generative models [27, 39, 44], aiming to handle diverse image generation tasks, even unseen ones, within a single unified framework. Despite significant progress, some critical issues remain to be addressed, such as (1) distinguishable and generalizable task instruction, (2) comprehensive task coverage during training, and (3) a unified model architecture. + +An ideal task instruction is crucial for guiding the model to process the desired task effectively. Existing methods primarily rely on language instructions [27, 44] or task-specific tokens [39] to distinguish the task to be performed. However, the complexity of visual tasks and the inherent gap between vision and language modalities make it hard for the model to understand language-only task descriptions, which leads to task confusion [39] and hinders generalization on unseen tasks [35, 71]. Moreover, pre-learned task-specific tokens constrain the model only to handle seen tasks. In contrast, large language models (LLMs) have successfully achieved unified multi-task modeling, partially due to the rise of in-context learning [5], which allows models to adapt various tasks using only a few demonstrations. We aim to replicate the concept of in-context learning in the pure visual modality, where the model learns the desired task directly from a few visual examples as task demonstrations, as shown in Fig. 1 (Left Top). In this setting, in-context learning shows strong potential for universal image generation. We summarize four key findings: (1) it supports various in-domain tasks with reduced task ambiguity (Fig. 1); + +![](images/4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg) +Figure 3. Unseen Tasks: Leveraging in-context learning to unify multiple seen tasks into a single-step unseen task. Left: Unifying the [Depth to Image] and [Relighting] task into a single [Depth to Images with Various Lighting] task. Right: Unifying multiple dense prediction tasks into a joint prediction task. Results without visual context can be found in the appendix. + +![](images/74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg) + +(2) it generalizes to unseen tasks (Fig. 2, Fig. 8); (3) as an unseen strategy for task unification, it can integrate multiple sub-tasks into a single step and generate intermediate results (Fig. 3); (4) it enables reverse generation, i.e., inferring a set of conditions from a given target (Fig. 9). While prior works [1, 3, 4, 43, 66, 71, 82] have also explored in-context learning in vision, they are largely constrained to specific domains (such as dense prediction or style transfer [67, 87]), or simplified generation settings involving only one condition and one target image [43, 60]. + +From the perspective of task distribution, visual tasks are inherently sparse compared to those in natural language processing because task-specific datasets [71, 85] for different tasks have minimal overlap [19, 32, 79]. Such sparse task learning isolates the knowledge of each task and limits the model from learning shared features across tasks. Moreover, the weak correlations between tasks hinder knowledge transfer and adaptability to new tasks. However, existing works in multi-task learning [10, 16, 31, 53] have verified the benefits of overlapping knowledge across related tasks. To alleviate the sparsity of visual tasks, we introduce a graph-structured dataset, Graph200K, where each image is associated with annotations spanning five metatasks, i.e., conditional generation [80], IP preservation [76], style transfer [81], image editing [69], and restoration [77]. By combining different conditions, we train the model with a variety of tasks that overlap with each other. Given this highly overlapping and compact task space, our dataset significantly increases task density, allowing the model to learn shared and transferable knowledge more effectively. + +For the architecture design, it is essential to 1) accommodate flexible task formats [27, 35, 71], ensuring seamless in-context learning, and 2) remain compatible with state-of-the-art models [33, 88] to fully leverage their strong generative priors. In this work, we find that the state-of-the-art image infilling model [33] has a consistent objective with our + +in-context learning based universal generative formulation. Specifically, we concatenate all input and output images together, where the objective of a task is to fill the output area. This alignment enables us to build our model upon advanced general-purpose infilling models without additional modifications, achieving powerful universal generation capabilities with minimal data and training costs. + +In this work, we propose a universal image generation framework, VisualCloze, which fine-tunes FLUX.1-Filldev [33] with interrelated tasks sampled from Graph200K to learn transferable knowledge and support visual in-context learning. As the number of in-context examples increases, we observe enhanced performances and reduced task confusion, enabling the model to support a broad spectrum of in-domain tasks, including conditional generation, image restoration, editing, style transfer, IP-preservation, and their combinations. On unseen tasks, the model also shows a certain degree of generalization ability, as shown in Fig. 2. In summary, our main contributions are as follows: + +- We propose an in-context learning based universal image generation framework that supports a wide range of indomain tasks and exhibits generalization to unseen ones. +- We design a graph-structured dataset, Graph200K, which constructs a compact task space, enabling flexible online task sampling and promoting the models to learn shared and transferable knowledge across tasks. +- Our unified image generation formulation shares a consistent objective with the state-of-the-art infilling model, enabling exceptional performance through minimal tuning without modifying the structure. + +# 2. Related Work + +# 2.1. Image Generation + +Recent advances in text-to-image generation have achieved remarkable performance, largely driven by the development + +of autoregressive models [41, 58, 78] and diffusion models [2, 13, 15, 18, 24, 40, 42, 48, 51]. Among these, rectified flow transformers [15, 17, 33, 88] have shown great training efficiency and overall performance. Building on these foundational models, diverse applications have emerged, such as conditional generation [80], style transfer [64], and personalized generation [38]. More recently, universal models that address various tasks [35, 44, 83] have been explored. For example, unified models like OmniGen [71] leverage large vision language models to consolidate multiple tasks into a single framework. Similarly, UniReal [9] unifies image generation tasks as discontinuous video generation. However, they still face issues such as over-reliance on language instructions, isolation and sparsity of visual tasks, and architecture design accommodating flexible task formats. To address these issues, we propose a universal image generation framework that unifies generation tasks as image infilling. Through visual in-context learning and our Graph200K dataset that constructs a denser task space to learn transferable knowledge, our method alleviates ambiguity to support a diverse set of in-domain tasks and generalizes to tasks unseen during training. + +# 2.2. Visual In-context Learning + +Along with the emergence of large language models, such as GPT-3 [5], in-context learning [14] has been an effective approach to allow the language model to understand and perform complex tasks given a few demonstrations. Early works [21, 22] in vision modality propose image analogies to create an image filter from examples automatically. In recent years, leveraging inpainting model [3, 4, 82], masked image modeling [43, 66, 67], or vision-language model [1, 86], visual in-context learning is proposed to handle more tasks. However, they mainly focus on dense prediction [55, 59, 87] or visual understanding [63]. OmniGen [71] also leverages in-context learning to generalize to unseen domains, e.g., segmenting unseen concepts when the model has learned the segmentation task during training. However, it mainly focuses on simple tasks of dense prediction, and the gap between the unseen and training domains is still limited. Some recent works [34, 43, 60, 68] extend visual in-context learning to image generation, but they are still limited by simple tasks such as conditional generation and dense prediction. Moreover, the sparsity of visual tasks makes it difficult for models to learn transferable and overlapping knowledge across tasks, limiting the generation ability of in-context learning. In contrast, we introduce a graph-structured dataset that supports interrelated tasks and thus constructs a more dense task space, promoting the model to learn shared and transferable knowledge and enhance its adaptability. + +![](images/1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg) +Figure 4. Illustration of the proposed Graph200K dataset. Each image is annotated for five meta-tasks, i.e., conditional generation, image restoration, image editing, IP preservation, and style transfer. Using these tasks, we can combine a wide range of complex tasks, such as the bottom of the figure. + +# 3. Dataset + +Recent works [26, 44, 71] have made great progress in unified image generation. However, their generalization to unseen tasks remains highly limited. We partially attribute this issue to the sparsity and isolation of visual tasks, hindering the model from learning shared features across tasks and handling unseen ones. Moreover, weak correlations between tasks further hinder knowledge transfer, restricting the adaptability of models. Therefore, increasing task density or strengthening task inter-relations helps improve the generalization ability of models via a compact task distribution. In this paper, we take the Subject200K [61] dataset as a starting point and construct our Graph200K dataset by augmenting each image with 49 types of annotations spanning five meta-tasks. This enriched annotation space enables flexible construction of a wide range of related tasks by sampling and combining arbitrary subsets of annotations across different meta-tasks, as illustrated in Fig. 4. + +# 3.1. Graph-Structured Multi-Task Dataset + +In natural language processing, tasks overlap significantly, facilitating strong cross-task learning ability. In contrast, visual tasks are inherently distinct, posing challenges for vision models to achieve similar generalization ability via + +instruction tuning. To ease this issue, we introduce a Graph-Structured Multi-Task Dataset. As illustrated in Fig. 4 (a), given a text-to-image dataset, each image is treated as the central node of a graph, around which diverse task annotations are constructed, including those for various spatial conditions, degradations, image editing results, reference image for IP-preservation, and style transfer with various reference styles. The construction process for each task pair is detailed in the next section. + +As shown in Fig. 4, each task annotation forms a bidirectional edge with the image. Thus, the graph is strongly connected, which means that for any two nodes, bidirectional paths exist between them. In other words, a generation task can be formulated as a path within the graph. The nodes along a path (except the end node) serve as condition images, which is analogous to the question in instruction fine-tuning, while the target image (the end node) plays the role of the answer. Specifically, there are 49 types of nodes in our Graph200K, and we sample up to 134 highly overlapping tasks, making the model learn more compact and shared representations across tasks. Moreover, it enriches the diversity and flexibility of our instruction fine-tuning data. For example, the path reference $\rightarrow$ editing $\rightarrow$ image corresponds to the task of image editing with reference, as shown in Fig. 4 bottom. + +# 3.2. Dataset Construction + +For convenience, we inherit subject-driven data from the Subjects200K [61]. Additionally, 32 different degradations are applied online to the images to acquire restoration data. We summarize the data construction methods in this section for the remaining three tasks. + +Conditional generation. Each image is paired with 12 distinct conditions generated by specialized models, including canny edges [6], HED edges [72], Hough lines [20], semantic segmentation maps [37], depth maps [74], shape normal maps [73], and human keypoints [7], following ControlNet [80]. This work extends the conditions by incorporating SAM2 [50] masks, foreground segmentation, and open-world boxes and masks. The foreground segmentation, derived from the RMBG [84], supports diverse tasks such as inpainting and foreground extraction. Open-world bounding boxes are generated through the grounding caption capability of Qwen2-VL [65], which are processed using SAM2 [50] to produce corresponding masks. + +Style transfer. We transfer the style of images according to reference in both semantic-variant and semantic-invariant settings. Specifically, the semantic-invariant transfer adopts InstantStyle [64] to preserve the semantic content, while the semantic-variant transfer relies on FLUX.1-Redux-dev [33], using the style embeddings and depth as + +conditions. For each image, we randomly generate five stylized versions. Mixing the two tasks pushes the model to follow the in-context examples better to avoid ambiguity. + +Image editing. We design two types of editing tasks, including background-variant and background-invariant editing. The background-invariant editing begins with localizing the subjects. Then, we leverage a large vision-language model, Qwen2-VL [65], to modify the image caption with a new object that replaces the original subject. The image, with the subject masked, is subsequently processed by the FLUX.1-Fill-dev [33] inpainting model to integrate the alternative object into the masked region. The above operation is repeated five times to enrich the dataset. For background-variant editing, the difference lies in the last step, which utilizes FLUX.1-Redux-dev [33] with depth as the condition and the modified caption as the text prompt. + +# 3.3. Other Data + +To further expand the range of tasks and enhance the generalization ability of models, we incorporate several open-source datasets during training, including VITON-HD [11] for virtual try-on and PhotoDoodle [28] for artistic image editing. For image editing tasks, we also extend the dataset with OmniEdit [69]. Specifically, two sub-tasks, i.e., object addition and removal, are used for training. The other editing tasks, such as attribute modification and environment change, are treated as unseen tasks to assess the generalization ability of the trained model. Furthermore, we leverage a portion of high-quality internal data, covering tasks of the drawing process [62] and multi-view generation [29]. + +# 4. Method + +This paper identifies the core challenges in building a universal image generation model, including the need for a clearly defined and generalizable task formulation, visual task sparsity, and the lack of a unified framework for multi-task learning. In the previous section, we addressed the issue of task sparsity by constructing the compact Graph200K dataset. Sec. 4.1 introduces visual in-context learning as the ideal paradigm for universal task formulation. Afterward, Sec. 4.2 considers the image infilling model a unified multi-task framework, achieving strong generalization capabilities with minimal cost. + +# 4.1. Visual In-context Learning + +Language instructions are usually used to specify the generation definition to handle multiple visual generation tasks with a single generative model. However, due to the gap between vision and language, the text comprehension ability of image generation models remains limited. This issue leads to task confusion [39] in existing universal generative models and weak generalization to unseen tasks. Inspired + +![](images/1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg) +Figure 5. Concatenating images when applying position embeddings. The $L$ images within $C$ in-context examples and the query are first concatenated horizontally. Then, these concatenated rows are concatenated temporally to handle mismatched aspect ratios. + +by the success of few-shot learning on large language models [5], we recognize that visual context may serve as a more friendly task instruction for visual generative models, given their superior visual understanding capabilities. + +Therefore, in this paper, we re-propose visual in-context learning to build a universal and generalizable image generation system. For the sake of description, here we assume the image input-output of arbitrary conditional generation task as a query consisting of $L - 1$ condition images and a blank target $\varnothing$ to be completed by the model, i.e., $X = \mathrm{concat}(\{x_1,\dots ,x_{L - 1},\emptyset \})$ . In Sec. 5.1, we demonstrate that our method can be extended to more general scenarios, where it can generate images at arbitrary positions and in any quantity rather than just the single image at the end of the query. During training, we randomly provide up to $C$ in-context examples, each containing $L$ images as the query. This strategy ensures the generalization ability of models across different numbers of in-context examples. In our experiments, we show that providing in-context examples as task demonstrations not only helps alleviate task confusion and boost model performance across in-domain tasks [39], but also enhances the generalization ability on unseen tasks. + +# 4.2. Unified Multi-task Framework + +Unlike previous visual in-context learning methods that primarily focus on scenarios with a single image condition and a single context [43, 60], in this work, we aim to construct a unified framework capable of handling varying numbers of conditions and contexts, allowing for flexible adaptation to diverse tasks. For ease of description, we first assume all images processed by the model share the same size, $W \times H$ , and we extend to the scenario with mismatched aspect ratios at the end of this section. In this way, given $C$ in-context examples and the query, each containing $L$ images, all images can be concatenated into a complete grid-Layout image + +with a size of $(L\times W,(C + 1)\times H)$ . Then, the model can complete a task by infilling the target grids based on the surrounding context, akin to solving visual cloze puzzles. Therefore, we build our unified framework, VisualCloze, based on the general image infilling architecture capable of handling multiple resolutions. + +Consistent with common diffusion-based infilling model designs, our model can be formulated as follows: + +$$ +\hat {X} = f (X \mid T, M), \tag {1} +$$ + +where $X$ is the concatenated image, with the last grid left blank, $T$ is the language instruction, $M$ is the mask condition, and $\hat{X}$ represents the inflated result. The mask $M$ is a binary matrix with the size of $(H \times (C + 1), W \times L)$ : + +$$ +M (i, j) = \left\{ \begin{array}{l l} 1 & \text {i f} i \in [ H \times (C - 1), H \times C) \\ & \text {a n d} j \in [ W \times (L - 1), W \times L), \\ 0 & \text {o t h e r w i s e}, \end{array} \right. \tag {2} +$$ + +where $M(i,j) = 1$ indicates that the pixel will be masked and generated by the infilling model. Equ. (2) masks the region in the last row and column, i.e., the target image. During training, we also randomly mask one of the first $L - 1$ grids with a probability of 0.5, promoting reverse generation shown in Sec. 5.1. For the inference stage, we can crop $\hat{X}$ to obtain the target image easily. + +Aligned optimization objective. A key benefit of this design is that our VisualCloze formulation shares a highly consistent objective with general image infilling models without architectural modifications or explicit input conditions. This consistency allows us to directly fine-tune advanced image infilling models using the newly constructed dataset while maximizing the utilization of the prior knowledge of foundation models. In contrast, existing task-specific models often require introducing additional learnable modules [38, 69] or adapting to extra condition inputs [61], which may compromise the native capabilities of the model. + +Language instructions. Note that the design of language instruction is also necessary for VisualCloze because it is responsible for defining the grid image layout, describing the caption of the image to be generated, and specifying the task intent when in-context examples are unavailable. In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the $(C + 1)\times W$ layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. The details about the instructions are available in Appendix A. By restructuring the three components $X$ , $T$ , and $M$ in Equ. (1), we achieve a unified multi-task framework for image generation with the general image infilling paradigm and support in-context learning. + +Positional embedding. In the preceding section, all images are concatenated into a grid layout image and we can apply positional embedding (i.e., RoPE [57]) on this large image. However, a potential limitation lies in composing a grid image from in-context examples with varying aspect ratios. To overcome this issue, we leverage the 3D-RoPE in Flux.1-Fill-dev to concatenate the query and in-context examples along the temporal dimension, as shown in Fig. 5, effectively overcoming this issue without introducing any noticeable performance degradation. + +# 4.3. Implementation Details + +We use FLUX.1-Fill-dev [33] as our foundation model, considering its outstanding performance among open-source image infilling models. In this work, LoRA [25] is chosen to fine-tune the model instead of fully fine-tuning it to reduce training costs and preserve the capabilities of the foundation model. The resulting LoRA can also be fused with other LoRAs in the community, enabling more widespread applications. Specifically, we set the rank of LoRA as 256. The model is tuned for 20,000 iterations with an accumulated batch size of 64 on $8 \times \mathrm{A}100$ GPUs. We employ the AdamW optimizer with a learning rate of $1e^{-4}$ . Following FLUX.1-Fill-dev, we incorporate the lognorm noise strategy with dynamic time shifting. During training, the number of in-context examples is set up to 2 (i.e., $C$ as defined in Sec. 4.2), while $L$ , the number of images involved in a task, varies between 2 and 4 in the Graph200K dataset. During inference, the number of in-context examples can be generalized to a larger number. To balance computational efficiency, each image is resized to the area of $384 \times 384$ or $512 \times 512$ before concatenating them into a grid layout. High-resolution outputs can be obtained in practical applications through simple post-up-scaling techniques [45]. + +# 5. Experiments + +# 5.1. Qualitative Analysis of In-context Learning + +This section presents a series of experiments demonstrating the effectiveness of in-context learning across different tasks, especially those unseen during training. Based on our extensive experiments, we summarize four key findings that highlight the role of in-context learning. + +# In-Context Learning Findings 1 + +In-context learning can mitigate task confusion for seen tasks. + +Task ambiguity on seen tasks. The model occasionally experiences task confusion, failing to interpret the intended objective accurately, especially on dense prediction tasks. In-context learning effectively alleviates this issue + +![](images/8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg) +(a) Image to Pose + +![](images/89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg) + +![](images/1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg) +(b) Image to Depth +(c) Image to Edge + +![](images/799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg) +(d) Normal to Image +Figure 6. In-context learning mitigates the task ambiguity in seen tasks. We show three results using different initial noises. + +by providing task-specific demonstrations. For example, in Fig. 6 (a) and (c), the model may produce noisy results without in-context examples in pose estimation and edge detection, while increasing the number of in-context examples enhances the performance and stability. In depth estimation shown in Fig. 6 (b), in-context examples also improve the accuracy when the model originally makes inaccurate estimates, especially in distant areas. Additionally, in some tasks like conditional generation, we note that the model can generate satisfactory results stably even without in-context examples, as shown in Fig. 6 (d). However, the quantitative comparison in Tab. 1 still shows that using in-context learning can further improve the accuracy of task completion. + +# In-Context Learning Findings 2 + +In-context learning supports generalization to unseen tasks, where providing more in-context examples could lead to more accurate generation. + +Generalization on unseen tasks. Beyond mitigating task confusion, in-context learning also enables the model to generalize to tasks unseen during training. Fig. 2 has shown the model can successfully generate frontal faces from side-view images and transfer editing instructions [8] through in-context learning, even though they are not encountered during training. Here, we present additional examples of unseen tasks. For instance, although the model is trained exclusively on image editing tasks involving object addi + +![](images/4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg) +Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: change the setting to a winter scene. <\editing instruction> + +![](images/a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg) + +![](images/987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg) +Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: turn the color of sunglasses to green. + +![](images/ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg) + +![](images/15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg) +Figure 7. Unseen Tasks: Although the image editing tasks seen by the model are only about object addition and object removal, it can still generalize to other types of editing tasks, such as environment modification (Left) and attribute transformation (Right), through in-context learning. More unseen tasks are shown in Fig. 2. + +![](images/a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg) +Figure 8. Unseen Tasks: VisualCloze is capable of performing multi-subject driven generation [70], even though the model was only exposed to single subject-driven generation tasks during training. Best viewed by zooming in. + +tion and removal, it still generalizes to other types of editing tasks, such as environment changes and attribute modifications, as shown in Fig. 7. Furthermore, as demonstrated in Fig. 8, the model, trained solely on single-subject generation, can generate images preserving identities of multiple subjects. These results highlight that in-context learning is an effective guidance mechanism, enabling adaptation to novel tasks without retraining. + +# In-Context Learning Findings 3 + +In-context learning enables task unification, an unseen strategy that consolidating sub-tasks into a single step and generating intermediate results. + +Multi-task consolidation. Meanwhile, we also find that through in-context learning, we can consolidate multiple tasks into a single execution step, which can be viewed as another form of unseen task. Fig. 3 has shown two examples, where we 1) merge conditional generation and relighting shown on the left and 2) perform depth estimation, surface normal estimation, and edge detection simultaneously shown on the right. Similarly, Fig. 11 illustrates how we can combine multiple conditions for conditional generation to achieve finer control. For instance, generating a portrait based on keypoints provides only rough information about the location and body pose. In such cases, contour conditions can be used to control the attributes of other visual elements. + +# In-Context Learning Findings 4 + +Different in-context learning examples lead to varying effects, where examples that can better convey mission intent can achieve better and more stable generation. + +Varying effects of different in-context examples. Following prior works [46, 52] on the prompt selection, we also find that different in-context examples could impact the generation quality. Specifically, it is crucial that in-context examples provide correct and strong guidance about the task intention. For example, as shown in Fig. 10 (left), when the side faces are more towards the front than in Fig. 10 (right), the success rate of correctly generating frontal faces has dropped dramatically. + +![](images/e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg) +Two In-Context Examples + +![](images/de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg) + +![](images/d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg) + +![](images/4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg) + +![](images/fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg) + +![](images/e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg) + +![](images/89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg) +Task Prompt: In each row, a method uses[IMAGE1] gray-shaded depth map with distinct edges, [IMAGE2] Artistically rendered content for generating [IMAGE3] High-definition picture in a unique art style. + +![](images/85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg) + +![](images/42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg) + +![](images/498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg) + +![](images/62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg) + +![](images/2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg) + +![](images/f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg) + +![](images/7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg) + +![](images/2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg) + +![](images/14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg) + +![](images/0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg) + +![](images/f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg) +Task Prompt: Every row demonstrates how to transform [IMAGE1] an image with vivid details into [IMAGE2] gray-scale depth map with clear object boundaries, [IMAGE3] rgb normal map for bump mapping effects, [IMAGE4] soft-edged map from hed detection through a logical approach. + +![](images/014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg) + +![](images/3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg) + +![](images/22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg) + +![](images/e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg) +Figure 9. Unseen Tasks: Through in-context learning, we can perform reverse generation from targets to conditions. For example, (a) decomposing the layout and style from a stylized image and (b) inferring the image, depth, and surface normal simultaneously from an edge map, which is the reverse task of Fig. 3 (Left). + +![](images/980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg) + +![](images/0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg) +Figure 10. Illustration of the impact of different in-context examples on in-context learning. In the second example on the left, the left and right faces are too biased towards the front, so they do not show the core goal of the task intention. + +![](images/db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg) + +# In-Context Learning Findings 5 + +In-context learning can guide bilateral generation, even for the reverse process that is unseen during training. + +Bilateral generation. In addition to generating the target from a set of given conditions, our model also shows the capability of reverse generation, i.e., inferring the underlying conditions from the target. Although our model has randomly treated one condition image as the target when + +![](images/80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg) + +![](images/35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg) + +![](images/c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg) + +Figure 11. Unseen Tasks: Unseen combinations of multiple tasks. For conditional generation, we integrate multiple conditions achieve more precise control. More examples are shown in Fig. 3. +![](images/f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg) +Task Prompt: Every row demonstrates how to transform [IMAGE1] human pose with colored lines for bone structure and [IMAGE2] canny map with sharp white edges and dark into [IMAGE3] a visually striking and clear picture through a logical approach. + +![](images/28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg) + +![](images/93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg) + +training as described in Sec. 4.2, it can generalize to a more challenging and unseen setting during inference, i.e., inferring all conditional images from only the target image. For instance, as illustrated in Fig. 9 (left), the model can reverse-engineer both the original and the style reference images given a stylized image, demonstrating the ability to disentangle the content and style representations. Similarly, as shown in Fig. 9 (right), the model can generate the corresponding real image, depth estimation, and surface normal estimation from an edge image, representing the inverse task of Fig. 3 (left). The ability to perform such + +
ConditionMethodContextControllabilityQualityText Consistency
F1 ↑RMSE ↓FID [23] ↓SSIM ↑MAN-IQA [75] ↑MUSIQ [30] ↑CLIP-Score [49] ↑
CannyControlNet [80]0.13-46.060.340.3145.4534.10
OminiControl [61]0.47-29.580.610.4461.4034.40
OneDiffusion [35]0.39-32.760.550.4659.9934.99
OmniGen [71]0.43-51.580.470.4762.6633.66
Oursdev00.39-30.360.610.4861.1335.03
Oursfill00.35-30.600.550.4964.3934.98
Oursfill10.36-31.340.550.4964.1234.96
Oursfill20.36-31.150.560.4964.0834.85
DepthControlNet [80]-23.7036.830.410.4460.1734.49
OminiControl [61]-21.4436.230.520.4460.1834.08
OneDiffusion [35]-10.3539.030.490.4960.4934.71
OmniGen [71]-15.0786.080.260.4964.9029.72
Oursdev0-25.0642.140.530.4658.9534.80
Oursfill0-10.3133.880.540.4864.8535.10
Oursfill1-9.9134.440.540.4964.3234.95
Oursfill2-9.6834.880.540.4864.2934.89
DeblurControlNet [80]-37.8253.280.490.4561.9233.80
OminiControl [61]-19.7026.170.850.4560.7034.53
OneDiffusion [35]-------
OmniGen [71]-------
Oursdev0-25.0356.760.740.3846.6833.52
Oursfill0-26.5340.590.740.4659.6234.56
Oursfill1-25.8736.930.760.4861.5834.82
Oursfill2-25.5736.280.760.4861.7734.82
+ +Table 1. Quantitative comparison on conditioning generation and image restoration. The methods that train a specialist for each task are marked as gray color. Except for these methods, the best method is bolded, and the second best method is underlined. + +
MethodContextDINOv2CLIP-ICLIP-T
OminiControl [61]73.1787.7033.53
OneDiffusion [35]73.8886.9134.85
OmniGen [71]67.7383.4334.53
Oursdev078.0587.6835.06
Oursfill080.4189.6335.16
Oursfill179.3389.2235.02
Oursfill280.3289.3635.01
+ +reverse tasks highlights the flexibility and robustness in understanding complex relationships between different types of image representations. + +# 5.2. Main Results + +We compare our method with universal generative models, including OmniGen [71] and OneDiffusion [35], as well as specialized models, such as ControlNet [80] and Omni- + +Table 2. Quantitative comparison for subject-driven image generation. We report clip scores on text alignment and style consistency. Specialists are shaded in gray. Among the remaining methods, the best is emphasized in bold, while the second best is underlined. + +
text↑image↑
InstantStyle [64]0.270.60
OmniGen [71]0.270.52
Oursdev0.300.53
Oursfill0.290.55
+ +Table 3. Quantitative comparison for style transfer. We report CLIP scores on text alignment and style consistency. The specialists are indicated in gray. Among the others, the top-performing one is highlighted in bold, and the second best is underlined. + +Control [61]. The details of the evaluation metrics are provided in Appendix C. Additionally, we fine-tune FLUX.1-dev [33] using the same settings as FLUX.1-Fill-dev for comparison and refer to the tuned models as Oursdev and Oursfill. The details of Oursdev are shown in Appendix B. + +For conditional generation and image restoration, we evaluate the models based on three criteria, i.e., controllability, visual quality, and text consistency, following the evaluation approach of OminiControl [61]. As shown in Tab. 1, our framework demonstrates comparable controllability to existing universal methods while achieving superior visual quality and text consistency. Compared to spe + +![](images/66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg) +Figure 12. Comparison between Flux.1-dev (Oursdev) and Flux.1-Fill-dev (Oursfill). + +cialized methods, our model performs on par with the best results and even outperforms them on the depth-to-image. + +In the style transfer task, we measure text consistency and style alignment using the CLIP [49] model. As reported in Tab. 3, our method outperforms OmniGen [71] by $2\%$ and $3\%$ in text alignment and style consistency, respectively. Even when compared with InstantStyle-Plus [81], a specialized model, we achieve a $2\%$ improvement in text consistency, with only a slight decrease in style alignment. + +Furthermore, we evaluate the models on subject-driven image generation and report semantic alignment using the DINOv2 [47], CLIP-I [49], and CLIP-T [49] scores. Across all these metrics, our method consistently delivers improvements, as shown in Tab. 2. For example, compared to the specialized model OminiControl [61], we achieve improvements of $7.15\%$ , $1.66\%$ , and $1.48\%$ in these three scores. + +Advantages of the infilling model. Our method (Oursfill) is built on FLUX.1-Fill-dev [33], which shares the same objective as our unified image generation framework. To verify its effectiveness, we also fine-tune Fill.1-dev [33] (Oursdev) using identical settings. Unlike Oursfill, which requires no modifications, Oursdev necessitates model adaptations for universal image generation, as shown in Appendix B. Despite its simplicity, Oursfill achieves superior performance across multiple tasks. + +As shown in Tab. 1, $\text{Ours}_{\text{dev}}$ achieves a higher F1 score than $\text{Ours}_{\text{fill}}$ in the canny-to-image generation. However, in other tasks, $\text{Ours}_{\text{fill}}$ demonstrates a significant advantage. For instance, in the depth-to-image generation, $\text{Ours}_{\text{fill}}$ reduces RMSE from 25.06 to 10.31. In the deblurring task, $\text{Ours}_{\text{fill}}$ achieves superior quality by lowering RMSE while maintaining a higher SSIM. In subject-driven image generation, Tab. 2 shows that $\text{Ours}_{\text{fill}}$ consistently outperforms $\text{Ours}_{\text{dev}}$ . Additionally, in semantic-invariant style transfer, $\text{Ours}_{\text{fill}}$ delivers comparable performance to $\text{Ours}_{\text{dev}}$ , as shown in Tab. 3. + +Fig. 12 presents a visual comparison, where Oursfill demonstrates clear advantages over Oursdev. Notably, in the depth-to-image generation, images produced by Oursdev frequently exhibit diagonal streak artifacts, which significantly degrade visual fidelity. Considering the advantages in performance, visual quality, and architectural efficiency, Oursfill stands out as the superior model. + +Quantitative comparison on in-context learning. Here, we further analyze the impact of in-context learning on seen tasks. Tab. 1 demonstrates the impact of in-context learning on different image generation tasks. Under the canny condition, our method without in-context examples achieves an FID of 30.60, which improves to 31.15 with two in-context examples. When conditioned on depth, the RMSE decreases from 10.31 to 9.68 as the number of in-context examples increases, indicating enhanced structural consistency. Similarly, in the deblurring task, RMSE decreases from 26.53 to 25.57, reflecting improved fidelity to the original content. These results highlight in-context learning as an effective guidance mechanism, enabling the model to better align with the task intent. + +# 6. Limitations + +While our model demonstrates strong stability across most in-domain tasks, it still exhibits some instability in specific tasks, such as object removal. This limitation suggests that the performance is sensitive to certain task characteristics. Additionally, the stability of the model on unseen tasks is still insufficient. Apart from the difficulty of the task and the difference with seen tasks, ambiguous in-context examples may also lead to less stable results, as discussed in Sec. 5.1. + +# 7. Conclusion + +In this work, we propose VisualCloze, a universal image generation framework that addresses key challenges in existing methods, including generalizable instruction design, appropriate task distributions, and unified architectural design. Rather than relying solely on language-based instructions to convey task intent, we re-propose visual in-context learning, enabling the model to learn tasks from a few demonstrations. This approach improves generalization to unseen tasks and reduces task ambiguity. To overcome the sparsity of visual task distributions, which limits the learning of transferable knowledge, we construct Graph200K, a graph-structured dataset that establishes interrelated tasks. In this compact task space, the model is promoted to learn transferable representations and improve adaptability. Meanwhile, we identify the consistent objective between image infilling and our universal generation formulation, allowing us to seamlessly adapt general-purpose infilling models for universal generation without + +architectural modifications. Experimental results show that our approach supports a diverse set of in-domain tasks using in-context learning while demonstrating strong generalization to unseen tasks. + +# References + +[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022. 3, 4 +[2] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In ICLR, 2023. 4 +[3] Ivana Balazevic, David Steiner, Nikhil Parthasarathy, Relja Arandjelovic, and Olivier J Henaff. Towards in-context scene understanding. In NeurIPS, 2023. 3, 4 +[4] Amir Bar, Yossi Gandelsman, Trevor Darrell, Amir Globerson, and Alexei A Efros. Visual prompting via image inpainting. In NeurIPS, 2022. 3, 4 +[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. *NeurIPS*, 2020. 2, 4, 6 +[6] John Canny. A computational approach to edge detection. IEEE TPAMI, 1986. 5 +[7] Z. Cao, G. Hidalgo Martinez, T. Simon, S. Wei, and Y. A. Sheikh. Openpose: Realtime multi-person 2d pose estimation using part affinity fields. IEEE TPAMI, 2019. 5 +[8] Lan Chen, Qi Mao, Yuchao Gu, and Mike Zheng Shou. Edit transfer: Learning image editing via vision in-context relations. arXiv preprint arXiv:2503.13327, 2025. 7 +[9] Xi Chen, Zhifei Zhang, He Zhang, Yuqian Zhou, Soo Ye Kim, Qing Liu, Yijun Li, Jianming Zhang, Nanxuan Zhao, Yilin Wang, Hui Ding, Zhe Lin, and Hengshuang. Unireal: Universal image generation and editing via learning real-world dynamics. arXiv preprint arXiv:2412.07774, 2024. 4 +[10] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3 +[11] Seunghwan Choi, Sunghyun Park, Minsoo Lee, and Jaegul Choo. Viton-hd: High-resolution virtual try-on via misalignment-aware normalization. In CVPR, 2021. 2, 5 +[12] Zheng Chong, Xiao Dong, Haoxiang Li, shiyue Zhang, Wenqing Zhang, Hanqing Zhao, xujie zhang, Dongmei Jiang, and Xiaodan Liang. CatVTON: Concatenation is all you need for virtual try-on with diffusion models. In ICLR, 2025. 2 +[13] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In NeurIPS, 2021. 4 + +[14] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Tianyu Liu, Baobao Chang, Xu Sun, Lei Li, and Zhifang Sui. A survey on in-context learning. arXiv preprint arXiv:2301.00234, 2024. 4 +[15] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 4 +[16] Christopher Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3 +[17] Peng Gao, Le Zhuo, Dongyang Liu, Ruoyi Du, Xu Luo, Longtian Qiu, Yuhang Zhang, Chen Lin, Rongjie Huang, Shijie Geng, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 4 +[18] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2024. 4 +[19] Golnaz Ghiasi, Barret Zoph, Ekin D. Cubuk, Quoc V. Le, and Tsung-Yi Lin. Multi-task self-training for learning general representations. In ICCV, 2021. 3 +[20] Geonmo Gu, Byungsoo Ko, SeoungHyun Go, Sung-Hyun Lee, Jingeun Lee, and Minchul Shin. Towards light-weight and real-time line segment detection. In AAAI, 2022. 5 +[21] Aaron Hertzmann. Algorithms for rendering in artistic styles. PhD thesis, New York University, Graduate School of Arts and Science, 2001. 4 +[22] Aaron Hertzmann, Charles E. Jacobs, Nuria Oliver, Brian Curless, and David H. Salesin. Image analogies. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, 2001. 4 +[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 10, 16 +[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 4 +[25] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In ICLR, 2022. 7 +[26] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Huanzhang Dou, Yupeng Shi, Yutong Feng, Chen Liang, Yu Liu, and Jingren Zhou. Group diffusion transformers are unsupervised multitask learners. arXiv preprint arxiv:2410.15027, 2024. 4 +[27] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arxiv:2410.23775, 2024. 2, 3 + +[28] Shijie Huang, Yiren Song, Yuxuan Zhang, Hailong Guo, Xueyin Wang, Mike Zheng Shou, and Jiaming Liu. Photodoodle: Learning artistic image editing from few-shot pairwise data. arXiv preprint arXiv:2502.14397, 2025. 5 +[29] Zehuan Huang, Yuanchen Guo, Haoran Wang, Ran Yi, Lizhuang Ma, Yan-Pei Cao, and Lu Sheng. Mv-adapter: Multi-view consistent image generation made easy. arXiv preprint arXiv:2412.03632, 2024. 5 +[30] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 10, 16 +[31] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3 +[32] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 3 +[33] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 3, 4, 5, 7, 10, 11, 16 +[34] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 4 +[35] Duong H. Le, Tuan Pham, Sangho Lee, Christopher Clark, Aniruddha Kembhavi, Stephan Mandt, Ranjay Krishna, and Jiasen Lu. One diffusion to generate them all, 2024. 2, 3, 4, 10 +[36] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023. 16 +[37] Kunchang Li, Yali Wang, Junhao Zhang, Peng Gao, Guanglu Song, Yu Liu, Hongsheng Li, and Yu Qiao. Uniformer: Unifying convolution and self-attention for visual recognition. IEEE TPAMI, 2023. 5 +[38] Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, MingMing Cheng, and Ying Shan. Photomaker: Customizing realistic human photos via stacked id embedding. In CVPR, 2024. 2, 4, 6 +[39] Weifeng Lin, Xinyu Wei, Renrui Zhang, Le Zhuo, Shitian Zhao, Siyuan Huang, Junlin Xie, Yu Qiao, Peng Gao, and Hongsheng Li. Pixwizard: Versatile image-to-image visual assistant with open-language instructions. arXiv preprint arXiv:2409.15278, 2024. 2, 5, 6 +[40] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In ICLR, 2023. 4 +[41] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 4 +[42] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 4 + +[43] Yihao Liu, Xiangyu Chen, Xianzheng Ma, Xintao Wang, Jiantao Zhou, Yu Qiao, and Chao Dong. Unifying image processing as visual prompting question answering. arXiv preprint arXiv:2310.10513, 2023. 3, 4, 6 +[44] Chaojie Mao, Jingfeng Zhang, Yulin Pan, Zeyinzi Jiang, Zhen Han, Yu Liu, and Jingren Zhou. Ace++: Instruction-based image creation and editing via context-aware content filling. arXiv preprint arXiv:2501.02487, 2025. 2, 4 +[45] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073, 2021. 7 +[46] Noor Nashid, Mifta Sintaha, and Ali Mesbah. Retrieval-based prompt selection for code-related few-shot learning. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pages 2450-2462. IEEE, 2023. 8 +[47] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 11, 16 +[48] William Peebles and Saining Xie. Scalable diffusion models with transformers. In ICCV, 2023. 4, 16 +[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10, 11, 16 +[50] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, ChaoYuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. 5 +[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 4 +[52] Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. arXiv preprint arXiv:2112.08633, 2021. 8 +[53] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3 +[54] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 2, 16 +[55] Dianmo Sheng, Dongdong Chen, Zhentao Tan, Qiankun Liu, Qi Chu, Jianmin Bao, Tao Gong, Bin Liu, Shengwei Xu, and Nenghai Yu. Towards more unified in-context visual understanding. In CVPR, 2024. 4 +[56] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image + +generation in any style. arXiv preprint arXiv:2306.00983, 2023.16 +[57] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021. 7 +[58] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 4 +[59] Yanpeng Sun, Qiang Chen, Jian Wang, Jingdong Wang, and Zechao Li. Exploring effective factors for improving visual in-context learning. arXiv preprint arXiv:2304.04748, 2023. 4 +[60] Yasheng SUN, Yifan Yang, Houwen Peng, Yifei Shen, Yuqing Yang, Han Hu, Lili Qiu, and Hideki Koike. Imagebrush: Learning visual in-context instructions for exemplar-based image manipulation. In NeurIPS, 2023. 3, 4, 6 +[61] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 4, 5, 6, 10, 11 +[62] Paints-Undo Team. Paints-undo github page, 2024. 5 +[63] Alex Jinpeng Wang, Linjie Li, Yiqi Lin, Min Li, Lijuan Wang, and Mike Zheng Shou. Leveraging visual tokens for extended text contexts in multi-modal learning. NeurIPS, 2024. 4 +[64] Haofan Wang, Peng Xing, Renyuan Huang, Hao Ai, Qixun Wang, and Xu Bai. Instantstyle-plus: Style transfer with content-preserving in text-to-image generation. arXiv preprint arXiv:2407.00788, 2024. 2, 4, 5, 10 +[65] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5 +[66] Xinlong Wang, Wen Wang, Yue Cao, Chunhua Shen, and Tiejun Huang. Images speak in images: A generalist painter for in-context visual learning. In CVPR, 2023. 3, 4 +[67] Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, and Tiejun Huang. Seggpt: Towards segmenting everything in context. In ICCV, 2023. 3, 4 +[68] Zhendong Wang, Yifan Jiang, Yadong Lu, yelong shen, Pengcheng He, Weizhu Chen, Zhangyang Wang, and Mingyuan Zhou. In-context learning unlocked for diffusion models. In NeurIPS, 2023. 4 +[69] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image editing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 2, 3, 5, 6 +[70] Shaojin Wu, Mengqi Huang, Wenxu Wu, Yufeng Cheng, Fei Ding, and Qian He. Less-to-more generalization: Unlocking more controllability by in-context generation. arXiv preprint arXiv:2504.02160, 2025. 8 +[71] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and + +Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2, 3, 4, 10, 11 +[72] Saining Xie and Zhuowen Tu. Holistically-nested edge detection. In CVPR, 2015. 5 +[73] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, Fisher Yu, Dacheng Tao, and Andreas Geiger. Unifying flow, stereo and depth estimation. IEEE TPAMI, 2023. 5 +[74] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 5 +[75] Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, and Yujiu Yang. Maniaq: Multi-dimension attention network for no-reference image quality assessment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1191-1200, 2022. 10, 16 +[76] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 3 +[77] Fanghua Yu, Jinjin Gu, Zheyuan Li, Jinfan Hu, Xiangtao Kong, Xintao Wang, Jingwen He, Yu Qiao, and Chao Dong. Scaling up to excellence: Practicing model scaling for photo-realistic image restoration in the wild. arXiv preprint arXiv:2401.13627, 2024. 3 +[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 4 +[79] Hayoung Yun and Hanjoo Cho. Achievement-based training progress balancing for multi-task learning. In ICCV, 2023. 3 +[80] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, 2023. 3, 4, 5, 10 +[81] Yuxin Zhang, Nisha Huang, Fan Tang, Haibin Huang, Chongyang Ma, Weiming Dong, and Changsheng Xu. Inversion-based style transfer with diffusion models. In CVPR, 2023. 2, 3, 11 +[82] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? In NeurIPS, 2023. 3, 4 +[83] Canyu Zhao, Mingyu Liu, Huanyi Zheng, Muzhi Zhu, Zhiyue Zhao, Hao Chen, Tong He, and Chunhua Shen. Disception: A generalist diffusion model for visual perceptual tasks. arXiv preprint arXiv:2502.17157, 2025. 4 +[84] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CAAI Artificial Intelligence Research, 2024. 5 +[85] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, 2017. 3 +[86] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024. 4 + +[87] Muzhi Zhu, Yang Liu, Zekai Luo, Chenchen Jing, Hao Chen, Guangkai Xu, Xinlong Wang, and Chunhua Shen. Unleashing the potential of the diffusion model in few-shot semantic segmentation. In NeurIPS, 2024. 3, 4 +[88] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Xiangyang Zhu, Fu-Yun Wang, Zhanyu Ma, Xu Luo, Zehan Wang, Kaipeng Zhang, Lirui Zhao, Si Liu, Xiangyu Yue, Wanli Ouyang, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina next: Making lumina-t2x stronger and faster with next-dit. In NeurIPS, 2024. 2, 3, 4 + +# Appendix A. Instruction Format + +In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. Fig. 13 illustrates the instructions for concept fusion of style, subject, and layout (Fig. 13 upper) and image editing with reference (Fig. 13 bottom). The content instruction is omitted for some tasks that provide strong visual cues in conditions, like style transfer. + +# Appendix B. Fine-tuning FLUX.1-dev Model + +Apart from FLUX.1-Fill-dev, we also adapt our method to FLUX.1-dev [33], a common text-to-image generative model. Unlike the infilling model that shares a consistent objective with universal image generation, FLUX.1-dev requires customized modifications to process clean condition images and noise target images. Specifically, after concatenating images in a grid layout like the infilling model, we always keep the region corresponding to the conditions as clean latent embeddings throughout the sampling process. This strategy requires modifications in image sampling because FLUX.1-Fill-dev takes noise latent embeddings as input. Moreover, for the adaLN-Zero block [48], it is critical to calculate the separate mean and shift parameters for the regions of clean conditions and noise target by feeding $T = 0$ and $T = t$ into the adaLN-Zero, respectively. $t$ indicates the timestep in each sampling step and gradually increases from 0 to 1 along the sampling process. This strategy aligns with the pre-training domain of FLUX.1-dev, where different noise levels correspond to different mean and shift. As shown in Fig. 14, this strategy ensures the visual fidelity. + +# Appendix C. Evaluation Metrics + +# C.1. Conditioning Generation + +We assess the models from controllability, quality, and text consistency to evaluate image generation quality in conditioning generation and image restoration tasks. + +Controllability. For conditional image generation, we measure the difference between the input conditions and those extracted from generated images. Specifically, we calculate the F1 Score for the cany-to-image task and RMSE for the depth-to-image task. Additionally, for deblurring, we measure the RMSE between original and restored images. + +Generation quality. We measure the Generation quality using FID [23], SSIM, MAN-IQA [75], and MAN-IQA [75]. FID [23] measures the similarity between generated and real image feature distributions. SSIM evalu + +ates perceptual quality by comparing luminance, contrast, and structural patterns between images. It calculates local patch statistics and combines them into a composite score ranging from $-1$ to 1, with higher values indicating better structural preservation. MANIQA [75] and MUSIQ [30] leverage neural networks to predict image quality scores. + +Text consistency. Leveraging the powerful multi-modal capability of CLIP [49], we also measure the semantic alignment between generated images and text prompts, which reflects how the model follows instructions. + +# C.2. Subject Driven Generation + +Following DreamBooth [54] and BLIP-Diffusion [36], we measure DINOv2 [47], CLIP-I [49], and CLIP-T scores for the comparison of subject-driven image generation. DINOv2 [47] and CLIP-I scores measure the alignment between the reference subject and generated images through cosine similarity and CLIP score, respectively. CLIP-T measures the alignment between the generated image and the corresponding text prompt. + +# C.3. Style Transfer + +Following StyleDrop [56], we assess the performance of style transfer according to text consistency and style alignment. For text alignment, we measure the cosine similarity between embeddings of generated images and text prompts, where the embeddings are extracted by CLIP [49]. Regarding style consistency, we measure the cosine similarity between embeddings of generated images and style reference. Note that these two metrics should be considered together because the style consistency will reach 1.0 if the model collapses, where the model completely copies style reference as a composite image and ignores text instructions. + +![](images/ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg) + +# Layout instruction: + +12 images are organized into a grid of 3 rows and 4 columns, evenly spaced. + +# Task instruction: + +Each row describes a process that begins with [IMAGE1] white edge lines on black from canny detection, [IMAGE2] Photo with a strong artistic theme, [IMAGE3] a reference image showcasing the dominant object and results in [IMAGE4] High-quality visual with distinct artistic touch. + +# Content instruction: + +0 + +![](images/57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg) +(a) Concatenated images +Figure 13. Examples of language instructions that contain prompts about the layout of the concatenated image, task intent, and content of the target image. + +# Layout instruction: + +A 3x3 grid containing 9 images, aligned in a clean and structured layout + +# Task instruction: + +Every row provides a step-by-step guide to evolve [IMAGE1] a reference image with the main subject included, [IMAGE2] an image with flawless clarity into [IMAGE3] a high-quality image. + +# Content instruction: + +The bottom-right corner image presents: A glossy gel nail polish bottle. At the edge of a bustling city park, this item rests on vibrant green grass, captured with a subtle bokeh effect as joggers and pets move in the background. + +# (b) Language instructions + +![](images/ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg) +Condition + +![](images/0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg) +Target + +![](images/8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg) +Condition + +![](images/fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg) +Target + +![](images/730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg) +(a) separate mean and shift + +![](images/d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg) +Figure 14. Effects of separate mean and shift in fine-tuning FLUX.1-dev. + +![](images/b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg) +(b) unified mean and shift + +![](images/8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg) \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07960/images/0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg b/data/2025/2504_07xxx/2504.07960/images/0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e2d9fc6d82a2e9f52d8d533a336c1ed968dd1f1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00e7b9ea2b9c02d8c00a4b3760a7233fb867f7e6927d9ef6afebd710bd00e28e +size 2501 diff --git a/data/2025/2504_07xxx/2504.07960/images/014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg b/data/2025/2504_07xxx/2504.07960/images/014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd566250d6c703b076326b97d8cddb06ea093de4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:402b2a68fb4c724404288114f5606070b146c6c6c742e552d1db97584a7aa382 +size 3399 diff --git a/data/2025/2504_07xxx/2504.07960/images/092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg b/data/2025/2504_07xxx/2504.07960/images/092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed2b1805eab75ff066da891f3cdb71c47bfe098b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:240e6b08c7c14fd6b5359636fa6532e361ef7d3721aea9c959b146a6b40699f2 +size 2200 diff --git a/data/2025/2504_07xxx/2504.07960/images/0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg b/data/2025/2504_07xxx/2504.07960/images/0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ce5daf2c2e13d692592edcfb2f2de4e63dbf45a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd16caeef783765a4298addff211e6df299354551be6a90ad677d77c349ae4b4 +size 8368 diff --git a/data/2025/2504_07xxx/2504.07960/images/0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg b/data/2025/2504_07xxx/2504.07960/images/0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..715f6934c1ede420c5ff7a5a429bb3e30bd599d3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d0ca05252701d1ce7ed04549d41e2f8759c7cc03c7d4a945d7877cdb4cfd5fc +size 2536 diff --git a/data/2025/2504_07xxx/2504.07960/images/0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg b/data/2025/2504_07xxx/2504.07960/images/0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00b928a069485e41154fcb1fad5a25446209ef2a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe83f68fa01012f9295f623365e67b9544fc81500286553cc95e90a730c77de3 +size 12554 diff --git a/data/2025/2504_07xxx/2504.07960/images/0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg b/data/2025/2504_07xxx/2504.07960/images/0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd59796cb7a91db6514240a440905f22891127d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0de5915d29ebb9399ab754cd9dbd3c0e2ac76093a4e1bbbcd8295c9ad50d5bfb +size 6302 diff --git a/data/2025/2504_07xxx/2504.07960/images/0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg b/data/2025/2504_07xxx/2504.07960/images/0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5206a18c29feeaf6af679be38404dc0d490f704a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5b681e77456cc814d19f0dece1013763bc04153f3b7bc1d1b8370109fc82ce0 +size 2630 diff --git a/data/2025/2504_07xxx/2504.07960/images/1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg b/data/2025/2504_07xxx/2504.07960/images/1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e374236f8c9d478cc89b40f17751c0ae84f6eca --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03d7c8ca396dc0832e5f916d0298526d91b2001bbf4f65bc3de1974a36b273e7 +size 2233 diff --git a/data/2025/2504_07xxx/2504.07960/images/1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg b/data/2025/2504_07xxx/2504.07960/images/1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f26e37337b23934c2a61abeca892978488ce552 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49171547e82492e79178a2f639a6974ba970d78d7a5e1664be0b0bee16119ecf +size 39018 diff --git a/data/2025/2504_07xxx/2504.07960/images/14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg b/data/2025/2504_07xxx/2504.07960/images/14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e2c6b1362c3d3055f2bc9e64e033ac535d1302e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1816e8e4bc55c1c2a835c7d64b0b44c6c40d97db52e198a4a91cb436474845ec +size 4683 diff --git a/data/2025/2504_07xxx/2504.07960/images/15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg b/data/2025/2504_07xxx/2504.07960/images/15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db07cc461b4ccee9ddf1b7ef900e9666fce522dd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54c06555fdb592a9dfc89ae8ddb3580d6d62c2fd37aa9154bfa910a00466e113 +size 32326 diff --git a/data/2025/2504_07xxx/2504.07960/images/1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg b/data/2025/2504_07xxx/2504.07960/images/1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fb6b02c15840e1d7343616e95c408177f293d92 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a8341fa68dbed9ff46277faa4c9310156c57fd72733f67a3c961107b8e916f0 +size 2358 diff --git a/data/2025/2504_07xxx/2504.07960/images/1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg b/data/2025/2504_07xxx/2504.07960/images/1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..640754f84757c33d6dfb4c58564ff0d4038153d6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78a809fa6403154b5aac6e33a3373d72a473253d2c68199967e1d29c65d947c4 +size 92882 diff --git a/data/2025/2504_07xxx/2504.07960/images/1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg b/data/2025/2504_07xxx/2504.07960/images/1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87a1764effa50bab308ed63940e5b414b8832cd7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e8868b0b61caeae4bb12ed0e66f188781e63240fb2c1a88b46cc5985882e88 +size 10175 diff --git a/data/2025/2504_07xxx/2504.07960/images/1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg b/data/2025/2504_07xxx/2504.07960/images/1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb2ecd2873e73691ad19c03f9c3904f53d0a6776 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:870fcd27fc8d27e7222d4ba1aa71fd6e654e66409a4a23d92d94aa1f787b1048 +size 3127 diff --git a/data/2025/2504_07xxx/2504.07960/images/1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg b/data/2025/2504_07xxx/2504.07960/images/1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8838b54319f37181d7e8952763ad8d6e8bcdf9b0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df0f14ea458f32f1582218344253ef75eb67ece1b3e71794f794593eb3a7e3c +size 2523 diff --git a/data/2025/2504_07xxx/2504.07960/images/1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg b/data/2025/2504_07xxx/2504.07960/images/1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..846c47169b4485d5c8452f2e239a8a1a80afb791 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1437d3991e1127e6119f0cbb04020fa75212581b2786f030ba6c4181f5e4d49 +size 6109 diff --git a/data/2025/2504_07xxx/2504.07960/images/1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg b/data/2025/2504_07xxx/2504.07960/images/1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4737e34743bc3c9520b41a205d816760ee9a6965 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9f9975aeb5aee1d7a153d83f27b3c875c8e75c136e21094ea76539c3c0dc834 +size 3134 diff --git a/data/2025/2504_07xxx/2504.07960/images/1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg b/data/2025/2504_07xxx/2504.07960/images/1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg new file mode 100644 index 0000000000000000000000000000000000000000..305ff16321d7df475a0c6986b64faffffbed3883 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:805adbebaeff76f452bc35be62d627d387a07199747bb229b5fa6a97a929ce70 +size 29074 diff --git a/data/2025/2504_07xxx/2504.07960/images/1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg b/data/2025/2504_07xxx/2504.07960/images/1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2325c40ce00f5e27325608af304935e70c9c2b6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1267c0815b6f1f41872eb9616a484cc03c20ace2b9310990d8d23e402035a412 +size 3047 diff --git a/data/2025/2504_07xxx/2504.07960/images/2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg b/data/2025/2504_07xxx/2504.07960/images/2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg new file mode 100644 index 0000000000000000000000000000000000000000..518c2ff44ff3ccde60ce1e9e46dbce2a64e29fb1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34020437d710c3b952539082272c968637a44898d9a9f2168146659b566edcf7 +size 2314 diff --git a/data/2025/2504_07xxx/2504.07960/images/2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg b/data/2025/2504_07xxx/2504.07960/images/2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..824393067ca92b32d108071ee06a92649b9a355c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79c42b9973c51a626c9c9dcdcfcc632f0d53830760b8a7f0d95ee3458f7daa72 +size 5139 diff --git a/data/2025/2504_07xxx/2504.07960/images/225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg b/data/2025/2504_07xxx/2504.07960/images/225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b971620ccd94f00306461222d4a8a5c4428a698 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca0bc97dd6df1acb9206e7a80ccfce87175827f44563855c67358af077bb4d1 +size 3274 diff --git a/data/2025/2504_07xxx/2504.07960/images/22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg b/data/2025/2504_07xxx/2504.07960/images/22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1cf0283a5a72b551e72154e06031102aa78be71b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:002f60625cb752e4bc3e034136ba80713ad175a3ba77271edd820aedc9756d72 +size 8428 diff --git a/data/2025/2504_07xxx/2504.07960/images/249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg b/data/2025/2504_07xxx/2504.07960/images/249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0659e52c5ec38ecf61dcc6c1bd28ff10262ad899 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a9a708219f058580b0f74998621bc8f840fe30192959c5d9cf2f669355e279b +size 3676 diff --git a/data/2025/2504_07xxx/2504.07960/images/24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg b/data/2025/2504_07xxx/2504.07960/images/24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6954ab2f327fd14a546113af26469286c1272a6b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5038e778927e1b44b1d2e25b85e8f92db96f550192f31b349516c300a2727f5c +size 9349 diff --git a/data/2025/2504_07xxx/2504.07960/images/2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg b/data/2025/2504_07xxx/2504.07960/images/2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c9706c1dae29cccc54bfb23264fe9c3c88040da --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2909ffb5e4cf2a083ec94211f33550937563e17368f5a99148f65ec117a131cd +size 3442 diff --git a/data/2025/2504_07xxx/2504.07960/images/28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg b/data/2025/2504_07xxx/2504.07960/images/28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b357e425bc6bae18e76fb34dd7f53c7b2a8ed4bb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8faac943edfaf1c0cae31f12ec8a3217c864b1d64f0ace7989994d0a86c4df4 +size 6808 diff --git a/data/2025/2504_07xxx/2504.07960/images/2973fc1c963afae2723902ff6deb0c428c16ec2f262084265fd41eb908c04ca6.jpg b/data/2025/2504_07xxx/2504.07960/images/2973fc1c963afae2723902ff6deb0c428c16ec2f262084265fd41eb908c04ca6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c95d75fefcc7dfe0d8b5fea7284248bd150af48e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2973fc1c963afae2723902ff6deb0c428c16ec2f262084265fd41eb908c04ca6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bcd0f10ff1cf766ac34275831bc7ecd9b407cb76377a679518b0621b640b6bc +size 39398 diff --git a/data/2025/2504_07xxx/2504.07960/images/2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg b/data/2025/2504_07xxx/2504.07960/images/2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20b350ccdd80fa9ada9f6fd7b8b6c28e56f2be05 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b2d13628c050d38d9e5bf61bd8c9c8bb7f08a372cf8ee01ae6109d636e8a9a5 +size 4339 diff --git a/data/2025/2504_07xxx/2504.07960/images/29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg b/data/2025/2504_07xxx/2504.07960/images/29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..249659ae3f5c1be97cebb61d3c797012fe6f1346 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f982926e5f0ca727076be8bcdba2baa5f128c37ddc19505e51c050f6f7a632b +size 3383 diff --git a/data/2025/2504_07xxx/2504.07960/images/2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg b/data/2025/2504_07xxx/2504.07960/images/2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef79ba8a489c750cbc63f28413ff835e44b4f563 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80606b2de19d5d14cc3fa5d1558cb1b78f93617843a0fc10207e387cb1e0a497 +size 3324 diff --git a/data/2025/2504_07xxx/2504.07960/images/2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg b/data/2025/2504_07xxx/2504.07960/images/2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10db52e7f23da7e05ef1fd911a97f018e72234ff --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c544defaef54c68d63f3227befff1e2f40e68191f52e10c163cfacfc3eda718f +size 6115 diff --git a/data/2025/2504_07xxx/2504.07960/images/2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg b/data/2025/2504_07xxx/2504.07960/images/2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f6e80a327b8f5595122a349cded605e701523a1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4818024ffecba95cfcc8a1de5b14f7c766742300d375e6ab451d53c2f0afe50 +size 3451 diff --git a/data/2025/2504_07xxx/2504.07960/images/2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg b/data/2025/2504_07xxx/2504.07960/images/2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46dfe8b02a026ad763a3bf95c3be1d621b60ea1b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a1a16d9ea106622a7c21ad017e8cfeb94a607c3831b11c5d7f0357f3712de3 +size 938 diff --git a/data/2025/2504_07xxx/2504.07960/images/2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg b/data/2025/2504_07xxx/2504.07960/images/2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f523d627fca883af641d86b7c57e05c42c71a1c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25fc100683e63391f2641c62c424fdcd5a438eaebc99f24a4c472a743e90c71b +size 3822 diff --git a/data/2025/2504_07xxx/2504.07960/images/3490ba6341fe53dae081576bfafd9498c242ee534c5c93aff12f356ab80d5505.jpg b/data/2025/2504_07xxx/2504.07960/images/3490ba6341fe53dae081576bfafd9498c242ee534c5c93aff12f356ab80d5505.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e70e978d9df3e6efb4b22dde66286b7c94237291 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/3490ba6341fe53dae081576bfafd9498c242ee534c5c93aff12f356ab80d5505.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae8b58f7d67739d94d8789a8d9b6fb71c41fd185c373ca310caaa9cc087c9c68 +size 154719 diff --git a/data/2025/2504_07xxx/2504.07960/images/35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg b/data/2025/2504_07xxx/2504.07960/images/35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a532e406adc4323366d46b213e928e8d2181ca8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc221dde91aaf94fe2ed716e0e457752759d2a795dcc747a874caf8bc23fd1a0 +size 3577 diff --git a/data/2025/2504_07xxx/2504.07960/images/35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg b/data/2025/2504_07xxx/2504.07960/images/35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dc9c98f97bfb615d63b57aae1b3d816f4ae665c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:030eb4c85df9f5fc90e9928a6bac2546493031087e7374d9cfc5e76165e4edc6 +size 6628 diff --git a/data/2025/2504_07xxx/2504.07960/images/3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg b/data/2025/2504_07xxx/2504.07960/images/3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c70282bf06fc68fa959a340435c7930861b2679 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a491ec96c6da736c5606ecf20c2c3c1fcd0be88ef18acd7028f191d4991782f +size 4908 diff --git a/data/2025/2504_07xxx/2504.07960/images/401a1f0cced2afe8598f4ec09f75d4b47f9f32f86402e3842fb51da4a1e0d20f.jpg b/data/2025/2504_07xxx/2504.07960/images/401a1f0cced2afe8598f4ec09f75d4b47f9f32f86402e3842fb51da4a1e0d20f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d99125bf750157ee833768a9365c65f323a5305e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/401a1f0cced2afe8598f4ec09f75d4b47f9f32f86402e3842fb51da4a1e0d20f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b1a2b2ecb0929546c9a32d6729537cd59a8807c33201ef2f83c82450cbe9789 +size 11261 diff --git a/data/2025/2504_07xxx/2504.07960/images/4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg b/data/2025/2504_07xxx/2504.07960/images/4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..839008d04e7495e06e498be53460ca9c1169a0fc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5aaf2730ee042f9da59a3a42eb7654127751993f7d486f84e879019782f32f5 +size 5557 diff --git a/data/2025/2504_07xxx/2504.07960/images/42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg b/data/2025/2504_07xxx/2504.07960/images/42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9240d7b99867207a55bcc79577c2404dc9231cf0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70655fca8f721e583e502fc1c998332954d499d22405a663cdbfd021142ef023 +size 6218 diff --git a/data/2025/2504_07xxx/2504.07960/images/4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg b/data/2025/2504_07xxx/2504.07960/images/4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1eef17d1ff948bb15012c36345bb80dde5409ad --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a366ccd339cc364ce11489e4bd1a8ff0dc47536ac16ef402e51c11c6d137fa19 +size 3323 diff --git a/data/2025/2504_07xxx/2504.07960/images/4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg b/data/2025/2504_07xxx/2504.07960/images/4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..410e5550ba3063ce442bfeaa0d352da8e1951981 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:454e71cb844d586cb7661fd139f0223440fac5aba1df186e3fb237aac67268fc +size 42074 diff --git a/data/2025/2504_07xxx/2504.07960/images/4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg b/data/2025/2504_07xxx/2504.07960/images/4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68b97210c76564ebb09d21dc8926f4c791064147 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58117e81d9ff70a5515a23d1ed00516f3961c7b654551f734cf3ef9034004f13 +size 3237 diff --git a/data/2025/2504_07xxx/2504.07960/images/47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg b/data/2025/2504_07xxx/2504.07960/images/47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e53de87d3549cb8dea9f4802f01c08b950837f82 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00374da581d64c7b96a1ac779c3f7d23595c0fa086d9e5ccb1376d173dd2831f +size 5858 diff --git a/data/2025/2504_07xxx/2504.07960/images/498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg b/data/2025/2504_07xxx/2504.07960/images/498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..206c811b3856cb84838f267da3d4fc0603e44d80 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1c3d47c5018e3ac44465670d251247a26cc18596a172edc1611bfc6a03f07bd +size 8698 diff --git a/data/2025/2504_07xxx/2504.07960/images/49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg b/data/2025/2504_07xxx/2504.07960/images/49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16d0909f675928314ce22cce3e5dc5c9aa07a47d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e1ed0b9ef046907f19e1360d64b17eac297ee954559803f95bde72450a56e30 +size 2548 diff --git a/data/2025/2504_07xxx/2504.07960/images/4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg b/data/2025/2504_07xxx/2504.07960/images/4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cc3b9eb45ceca970a5d6e6e2dcdb4e12f2d433f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1fc0644d031c70f3d336b8940e38e57c60ae90992096ecee8a466da90087dde +size 2627 diff --git a/data/2025/2504_07xxx/2504.07960/images/4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg b/data/2025/2504_07xxx/2504.07960/images/4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de51665e11b5716b0c2a98db3913da59f0172199 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:486bd296ebfeb5c7175c07d9cfd0d841ac46771497250ce9357ef9a110fd5f24 +size 37237 diff --git a/data/2025/2504_07xxx/2504.07960/images/4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg b/data/2025/2504_07xxx/2504.07960/images/4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a42b7aa5df5065ec2b6804ea0d3796434e875775 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527ff166e48565f162b5c1f8fffda213918fa245f7dee8c09eac00a8385ff151 +size 3627 diff --git a/data/2025/2504_07xxx/2504.07960/images/508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg b/data/2025/2504_07xxx/2504.07960/images/508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73c332a2bbc6b0708e31519206aded67589bd42f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71a0d2ab9b247835af393984b9be2919175e6e2f50705203d6ea7320108c51aa +size 8182 diff --git a/data/2025/2504_07xxx/2504.07960/images/57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg b/data/2025/2504_07xxx/2504.07960/images/57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03fd75804855a82763ba974d69685c2166e2f711 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d086b6d74bdffd3fe1aaf35648c3b93c000808520559580fd21e839b24a0948c +size 97024 diff --git a/data/2025/2504_07xxx/2504.07960/images/590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg b/data/2025/2504_07xxx/2504.07960/images/590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88aa7999fc30e7f7744e15c48b99416646a0465d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d451a0c0a8da46bb5da7bba3d82860caedd32120347f29c1e28f459d88b1db9b +size 3229 diff --git a/data/2025/2504_07xxx/2504.07960/images/5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg b/data/2025/2504_07xxx/2504.07960/images/5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..407b2acce4f19c84442b2da19a58dd472a11e83c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d3b3b118ca25db38262e3345878b0dc31c28930878ca49ff9772ad9524260f9 +size 1314 diff --git a/data/2025/2504_07xxx/2504.07960/images/5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg b/data/2025/2504_07xxx/2504.07960/images/5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3880b21363ed7277f034f234e2af9173faeb4f8b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4920053933be665d3e7eccfef15362bfdea03ea553e77e2314ff8bb7e4e26a61 +size 2477 diff --git a/data/2025/2504_07xxx/2504.07960/images/62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg b/data/2025/2504_07xxx/2504.07960/images/62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93bc4326ed859a98d4098e4e68436e49fcc02d62 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed9a4cd6e4d21b559b32e026f0fc9873aa4ba08f89faf8a831398e76c29875b +size 2647 diff --git a/data/2025/2504_07xxx/2504.07960/images/62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg b/data/2025/2504_07xxx/2504.07960/images/62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b634a04b2b13cacd21f6ffbec45a068f2f79319 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2517863d64696bddb6ae1109172490c25d5dbeec18b21110bff1fbf01f7bf4ae +size 3535 diff --git a/data/2025/2504_07xxx/2504.07960/images/66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg b/data/2025/2504_07xxx/2504.07960/images/66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27ecca298aff488449190f60cdbda4b1c6fc61d7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b68b70d5379eb0a66d004a62df808fd6920f3fb17b1d975d2f75435b8636b054 +size 38177 diff --git a/data/2025/2504_07xxx/2504.07960/images/66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg b/data/2025/2504_07xxx/2504.07960/images/66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8182a74f6c4473903061c4303b4bb90f1b0a3a62 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c27022d4839a51a16e1b0bd9959acac668a6da33958a87ded9af3dbd7fa243da +size 10383 diff --git a/data/2025/2504_07xxx/2504.07960/images/6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg b/data/2025/2504_07xxx/2504.07960/images/6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40dd44fb89d5dc7748efa0ac47a96c6b21dd781a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5741b3611b374376318af6dcef107a2f1807ed305e1798b386e8770c68514b77 +size 3410 diff --git a/data/2025/2504_07xxx/2504.07960/images/6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg b/data/2025/2504_07xxx/2504.07960/images/6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e434108dd39cad436209b97fba1e772adb30e05b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b66022b4744f62b158b73cf2fe40f51990a744b9ab8d8c4ebda2a46feb780bb +size 2558 diff --git a/data/2025/2504_07xxx/2504.07960/images/6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg b/data/2025/2504_07xxx/2504.07960/images/6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbe79de953d73f4f38043c1e71db2c07b6246620 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8e19b0f7a373d473e64f403511dd6158ff4c82010d92509deb4ecf8f10609e6 +size 2783 diff --git a/data/2025/2504_07xxx/2504.07960/images/730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg b/data/2025/2504_07xxx/2504.07960/images/730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7368903b78e4f53b2d283701f1f63725f0b236d4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac0f2ee94d518acb653ccfda369670fa029392ae73c25c97bd06977b37610ebf +size 4706 diff --git a/data/2025/2504_07xxx/2504.07960/images/74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg b/data/2025/2504_07xxx/2504.07960/images/74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d999bba0653d675587bd285f5715c2086bd4b7d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:748a66c2f0aa4be8972536b8fe167ec631b70f607189b34c0a9db02e73cb10f8 +size 69014 diff --git a/data/2025/2504_07xxx/2504.07960/images/75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg b/data/2025/2504_07xxx/2504.07960/images/75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80ad3c28d6a926d3d2b84c1d174af050a975f97a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b420c9e2460555a9cd8a333552ff85d39d8c533d5a9959755e3453d4752f0542 +size 874 diff --git a/data/2025/2504_07xxx/2504.07960/images/7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg b/data/2025/2504_07xxx/2504.07960/images/7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8e4aa2f1df1f62590387f89f504c6ad78ff819b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a67875615e0d43d96ab8bda58dde066a2cc2a5c8b05211b4b465520690bf206 +size 2537 diff --git a/data/2025/2504_07xxx/2504.07960/images/799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg b/data/2025/2504_07xxx/2504.07960/images/799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7d7177a5e42b82f8cdbf4b1ed520ce4a9c4f2bb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f623ae56bc7e3dfef8ca75bcf07febb65b264925ca6f377ed00183820d01eec5 +size 22711 diff --git a/data/2025/2504_07xxx/2504.07960/images/7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg b/data/2025/2504_07xxx/2504.07960/images/7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64283276a7d622e9b1864bf6fa85223d8588162f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd43b5534b6bfe166ab80f12372d37b60fd601fba5966c3c39f39517a5a167bd +size 4384 diff --git a/data/2025/2504_07xxx/2504.07960/images/7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg b/data/2025/2504_07xxx/2504.07960/images/7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e53231b07583dbf16834afee292ffee3c6dd41a0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1006f4def6601af60f1c3674474c03aa963b424aac6072ea02452be894a39267 +size 1404 diff --git a/data/2025/2504_07xxx/2504.07960/images/7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg b/data/2025/2504_07xxx/2504.07960/images/7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee24430880d73647f200c2155251a34019988147 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00795267a8206d7b0edd582244df0f757a521a22b2a811c0224cdd686c0dea7a +size 6722 diff --git a/data/2025/2504_07xxx/2504.07960/images/7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg b/data/2025/2504_07xxx/2504.07960/images/7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e395b644eebe511f61176bd34b278603f979d96a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c436c4e0387186d9be8aa6f2d12f18dde7dae2f6f0888d619e78e85078029023 +size 4200 diff --git a/data/2025/2504_07xxx/2504.07960/images/7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg b/data/2025/2504_07xxx/2504.07960/images/7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32891bdf24b8820f211a071c26a071e213907cc9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0febe8753714feae26415022e5d3f52d54bbcb1cb2e898241020123e8628f62 +size 3330 diff --git a/data/2025/2504_07xxx/2504.07960/images/80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg b/data/2025/2504_07xxx/2504.07960/images/80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a252c1dc9519c4b147e22aa059236e46b401f512 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9293afec079106020b15284f430c58bc537ed6432dc4adb5d980fb5d7678ee4b +size 3352 diff --git a/data/2025/2504_07xxx/2504.07960/images/82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg b/data/2025/2504_07xxx/2504.07960/images/82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72ca9b0d149952ab8345fa5263086544b914c296 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48173c6a97da4278e317b91f9bf8a5f8965f601ae1e16de36f305f495e03e5d5 +size 2748 diff --git a/data/2025/2504_07xxx/2504.07960/images/84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg b/data/2025/2504_07xxx/2504.07960/images/84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21696d1417c2535b686fa38fe81f3360e1c89efd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65c1f281029a4c3b2cf28cec978f30d71409939c40736dbb4cdd423042449b1f +size 1420 diff --git a/data/2025/2504_07xxx/2504.07960/images/85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg b/data/2025/2504_07xxx/2504.07960/images/85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20270679422c91510f72d5d94758eb557af3c6f0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:927510061c470e1fbd9e4b7f693e5e16bb7eccbeb827f796a3f2f6f2e8bb347b +size 4799 diff --git a/data/2025/2504_07xxx/2504.07960/images/863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg b/data/2025/2504_07xxx/2504.07960/images/863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2aff31fcbaead6dc0770d84c832e9050064f8059 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ea98969976d261489b907e7e7f8fe8213a4e8f7c16e688410f342a66fadbc0e +size 8196 diff --git a/data/2025/2504_07xxx/2504.07960/images/865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg b/data/2025/2504_07xxx/2504.07960/images/865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6d45d45c9a0c21685d9c9733bed205cba70cf8c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b23e0a7a2fc9cd616540c0b42f472edcd861113f6cd70844f70a2d8bb52fbf4f +size 3226 diff --git a/data/2025/2504_07xxx/2504.07960/images/8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg b/data/2025/2504_07xxx/2504.07960/images/8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d08fad1dd95f03ded15365c59eb3017a242f945 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:022438340a4dc0310ef7fc72761665b9f98bbc7cf5ed83951fb4d229f8bd13f5 +size 14876 diff --git a/data/2025/2504_07xxx/2504.07960/images/87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg b/data/2025/2504_07xxx/2504.07960/images/87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e5c7a76bb7ce38791bd33560d62bed12348ce5f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f088f83a092c2da2361c481f0955abace0bdb08f2c5e9080f6b7da0b0514ba7 +size 2544 diff --git a/data/2025/2504_07xxx/2504.07960/images/89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg b/data/2025/2504_07xxx/2504.07960/images/89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50b4a207994fed784fc8f5d721787cdca365b9e7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165562621cfd10b4006dbdd37bc4b0fc07d61e077340c5f44dc0380c885c7f03 +size 18972 diff --git a/data/2025/2504_07xxx/2504.07960/images/89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg b/data/2025/2504_07xxx/2504.07960/images/89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f135aab6c9784cbe2776939a6aeb34f2c2fdae86 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65ff8bd85e5e1d4387dbcbcf1f6d035caf1adf2e85bf0985b5e831b5692c0ba +size 4212 diff --git a/data/2025/2504_07xxx/2504.07960/images/8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg b/data/2025/2504_07xxx/2504.07960/images/8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2862195d11259c72f20e491a953d088da619585e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c60b0429ac7b11a13d909334c097beee052f592472523829e444a80eee1addc +size 2601 diff --git a/data/2025/2504_07xxx/2504.07960/images/8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg b/data/2025/2504_07xxx/2504.07960/images/8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1e93b07aa28682345267c070b80995e5e23bd6e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dda56bf0c711251b62f9803a98e9740967ef6d7c9530908be8a8253b706d826 +size 6069 diff --git a/data/2025/2504_07xxx/2504.07960/images/8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg b/data/2025/2504_07xxx/2504.07960/images/8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cab3b4727b0fb51275e4cc900bbf33512238fec3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ab3863d751436e1ecf756c14f0ceff75dbe1ae960ac7364379ba66ebe5dfa22 +size 2560 diff --git a/data/2025/2504_07xxx/2504.07960/images/8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg b/data/2025/2504_07xxx/2504.07960/images/8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebdd9ff64d1eca1d08b5c2c925819ab7acfe0353 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b096ca394039cb89943bb49186514c51deac09236ee1836f43b4ec403ca32f87 +size 6356 diff --git a/data/2025/2504_07xxx/2504.07960/images/8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg b/data/2025/2504_07xxx/2504.07960/images/8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76cfa4937ee830be15658618f2f5da621c35b6c2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42fd172b1656325f191b8d795461b5ab6675b518beb614a78ad6c4b42e539723 +size 19041 diff --git a/data/2025/2504_07xxx/2504.07960/images/93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg b/data/2025/2504_07xxx/2504.07960/images/93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb20f53b3a6e9344f18fa44fb42529e8047cd6ad --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9688800abae5f98e2b07b9cb00060cd6d9c8408f547514908d9c2b20dc1c1919 +size 8677 diff --git a/data/2025/2504_07xxx/2504.07960/images/93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg b/data/2025/2504_07xxx/2504.07960/images/93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e652ea7662a5b24723010db1750159e819396bc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2359d4e1a9a40fe0e81580ecbaaf44c97eaee823b2c5c675d31ac0930958d593 +size 4392 diff --git a/data/2025/2504_07xxx/2504.07960/images/940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg b/data/2025/2504_07xxx/2504.07960/images/940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c72663dd4408a6b8eac37d0305942eebb28efe46 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:544c74f79f9dc6f8bff677a7223163d1f15c2387367be2d84fa2208198ae1e8c +size 6324 diff --git a/data/2025/2504_07xxx/2504.07960/images/980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg b/data/2025/2504_07xxx/2504.07960/images/980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acec03cec2d9c34c9d60811563096ef4ec949538 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5edb5d9f1e0ce3baf8e897b767e4f2c18bf59a1b78ae114ecc1e28f96a7f1785 +size 20380 diff --git a/data/2025/2504_07xxx/2504.07960/images/986db1fa825df456e31cd4bee2b6f5fb6fccf2702cc504818830a6dd0e420542.jpg b/data/2025/2504_07xxx/2504.07960/images/986db1fa825df456e31cd4bee2b6f5fb6fccf2702cc504818830a6dd0e420542.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f36e62f90ced329a94be7104486d2f5ef3fad74 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/986db1fa825df456e31cd4bee2b6f5fb6fccf2702cc504818830a6dd0e420542.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5299382d78985b26015f017e55fe46c33d64f32c5510e18a1a753fc33f960a7 +size 3144 diff --git a/data/2025/2504_07xxx/2504.07960/images/987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg b/data/2025/2504_07xxx/2504.07960/images/987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81d8763f5f3bed39776e83f5d6e6fd9520995785 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7089d1ba06579971d244d088e9bc5dd32f9796a6049ae02fd586a0a52b30a9bb +size 32135 diff --git a/data/2025/2504_07xxx/2504.07960/images/9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg b/data/2025/2504_07xxx/2504.07960/images/9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0ccc5af2cab7c11a8fdcdbd239002be173dbe65 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543d6bf060c85c85728b92bf419c6df467c34af02e3909ca3f4a04a78c30959a +size 3180 diff --git a/data/2025/2504_07xxx/2504.07960/images/9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg b/data/2025/2504_07xxx/2504.07960/images/9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eac9c1db707355405d30cf5cc208c28d72ac765e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0027ecdf0bdf72068f2a0b18d20a65a21fd61c775b44184472cc623be8a83943 +size 4003 diff --git a/data/2025/2504_07xxx/2504.07960/images/9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg b/data/2025/2504_07xxx/2504.07960/images/9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15fe6e64cd93dabd8efd16091069d80a547e3713 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f23003bea7f2d9daee96d0de59ed57525c7ac1a9e0aebd45cd858b66e424a66 +size 7871 diff --git a/data/2025/2504_07xxx/2504.07960/images/a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg b/data/2025/2504_07xxx/2504.07960/images/a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ae52d3bc638abbde203c2ac995ecf8675718a6f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:362dd6462a4b4686301982938a7804c486794475c1d751e32927794769ff804b +size 3009 diff --git a/data/2025/2504_07xxx/2504.07960/images/a120bae33646642c1ee693c6f60b898e6ee0090f7d5c6d025c78f8129c24495c.jpg b/data/2025/2504_07xxx/2504.07960/images/a120bae33646642c1ee693c6f60b898e6ee0090f7d5c6d025c78f8129c24495c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85a9816d41e7d0210274d601aaa90533419afc70 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a120bae33646642c1ee693c6f60b898e6ee0090f7d5c6d025c78f8129c24495c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4beafe2abad0427122bbaf69f391c1e194787d6fd1613a88f9fb5bcb320ff78b +size 19162 diff --git a/data/2025/2504_07xxx/2504.07960/images/a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg b/data/2025/2504_07xxx/2504.07960/images/a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65e51e5860b7b479d94ce5e73d6576ef81643f03 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53c6132ac223994e29ed3c87f0678507fac384dbe63946658250becdbf448525 +size 33234 diff --git a/data/2025/2504_07xxx/2504.07960/images/a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg b/data/2025/2504_07xxx/2504.07960/images/a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13810fa212e52b828b4813acfb59464982ddb4d7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb84f1e6aa6c7c63835ffe1492e9c139e2e5f7f1b5b617e1f1c6d5cb79a753a9 +size 1022 diff --git a/data/2025/2504_07xxx/2504.07960/images/a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg b/data/2025/2504_07xxx/2504.07960/images/a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb327b7db6001e3078f056328c9de77b89d277fb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aa929241b5faeaaf2cedfec35be464a994a504546b68aaba345a0ef4ccb1848 +size 31283 diff --git a/data/2025/2504_07xxx/2504.07960/images/a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg b/data/2025/2504_07xxx/2504.07960/images/a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e60dd92bd0e725a4580f6a88f986df3bf49d1b30 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cf2cb1a086b209d8ea5892018a48affdfc5b949001c346260325e8fe687a8d3 +size 2473 diff --git a/data/2025/2504_07xxx/2504.07960/images/aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg b/data/2025/2504_07xxx/2504.07960/images/aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f64e14530cf2b383f60f1a9f67502bb22ae443b8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c32ab51197fd99064855b2815bff91b62d7a8077643874b6cc9840cae25701b5 +size 4513 diff --git a/data/2025/2504_07xxx/2504.07960/images/aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg b/data/2025/2504_07xxx/2504.07960/images/aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79b6e456bc1b50a340d4052aafb2b118765a6bd5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03fc787845176f530fdc49792445289a623a4e671ef4d54fd8bed96ac91c6d6b +size 9125 diff --git a/data/2025/2504_07xxx/2504.07960/images/ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg b/data/2025/2504_07xxx/2504.07960/images/ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0729e88cb46533795b378f8673ecfa98ea004050 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abbcdc1a2f5f5e84d1a209b1342d1e0b67d1b77c890deb23a0034090aaf85500 +size 2697 diff --git a/data/2025/2504_07xxx/2504.07960/images/aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg b/data/2025/2504_07xxx/2504.07960/images/aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a0f04d2812b902c23ab733520e696690a571df5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d0d231bc9419cd05e78d93c7e8160986c72c27fcb308cb5f6d0f5707f8604c7 +size 5099 diff --git a/data/2025/2504_07xxx/2504.07960/images/b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg b/data/2025/2504_07xxx/2504.07960/images/b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3de7a197420aa0dfe0f641ca0712262553fa043d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad14cf918eeb3a7f244735bba2d531c27b30043b030dc3f30860ea67be13c86d +size 5293 diff --git a/data/2025/2504_07xxx/2504.07960/images/b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg b/data/2025/2504_07xxx/2504.07960/images/b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e27e0602d55dc1d19fba8c317f3e329925623db --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ce640d48a28a0fae12526b6e012b6f6ee96db40d035706c4f800449382d8840 +size 5127 diff --git a/data/2025/2504_07xxx/2504.07960/images/b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg b/data/2025/2504_07xxx/2504.07960/images/b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfaeea569d8805b68f1400eeb6ff78153add2bcd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e7e903b3096fd906d6ac9a471696d1fd5d7760eca20c3f21538ac8d30e89f37 +size 5447 diff --git a/data/2025/2504_07xxx/2504.07960/images/bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg b/data/2025/2504_07xxx/2504.07960/images/bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..074e5d5628fcafbe3a1abf98548d90d4af91d452 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d6622083974eccb9f1b87f44dab02020a6c3dd2a639a07d8768663055add466 +size 2492 diff --git a/data/2025/2504_07xxx/2504.07960/images/bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg b/data/2025/2504_07xxx/2504.07960/images/bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dd4ea1ce7b148136ad35ee75515caed6cdc0b9b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e9da361ddc16b4f20f43068205ee682c71fbf1c3dcdad197aceff08ef7ed3dc +size 6708 diff --git a/data/2025/2504_07xxx/2504.07960/images/c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg b/data/2025/2504_07xxx/2504.07960/images/c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86e02b551a98cf768d405912306f14a8e1e5df57 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d743ea6087c6b4e969bd59f653bbe40e2e0ef433177484c3d7ba1925d22e4a5 +size 6044 diff --git a/data/2025/2504_07xxx/2504.07960/images/c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg b/data/2025/2504_07xxx/2504.07960/images/c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98076aebf434ac874b555cd921386bb3ac01be95 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a51bd8016f1d412b29a5db574bebdca906933d78b483b701d90b6b3e7a5857d4 +size 3709 diff --git a/data/2025/2504_07xxx/2504.07960/images/c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg b/data/2025/2504_07xxx/2504.07960/images/c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aab541146cab65be04b4882531b37ef67478af71 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aecca286d5ba7f8b8f4a74b44ac2a6301fa4486c4c32156c3be8d13cd83a9d1 +size 3711 diff --git a/data/2025/2504_07xxx/2504.07960/images/c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg b/data/2025/2504_07xxx/2504.07960/images/c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f6c6bbad29ff0077d40f7c26b904a79642d34ed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:130952792fc8fdf18a57ea193c95dbe333c0be4288ffdde7ac7ddee0cb577076 +size 4985 diff --git a/data/2025/2504_07xxx/2504.07960/images/d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg b/data/2025/2504_07xxx/2504.07960/images/d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f33edb45c92327ff60c2b21114b75394766e4f12 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f34761f502eb98cb4b55260e24fe0b86d4c1f7fee4cea14adcc9efda24e763bb +size 9655 diff --git a/data/2025/2504_07xxx/2504.07960/images/d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg b/data/2025/2504_07xxx/2504.07960/images/d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccc0cdd50537f03f40b06e0e2da59da072277759 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05749873eca159f0588c1292e53d192a1a0151803f6ecfc2634640c370e6732c +size 7221 diff --git a/data/2025/2504_07xxx/2504.07960/images/d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg b/data/2025/2504_07xxx/2504.07960/images/d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f510b72132eb364b30625b8d94951e8238cc56b4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5745055a8b7286c4aae53dd54937700fe091fb8b89e77c92b850fbf63549da6 +size 10060 diff --git a/data/2025/2504_07xxx/2504.07960/images/db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg b/data/2025/2504_07xxx/2504.07960/images/db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22baf1fd3f0c07dfa1157e3fe7bd229fea3bca8f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b12ee6a80e0db6a08a01083bff80315be030f0a9db8f7dcacc18a6cb57a910e +size 11540 diff --git a/data/2025/2504_07xxx/2504.07960/images/de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg b/data/2025/2504_07xxx/2504.07960/images/de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50e8e9828d6d5e3aa31893d5c26dcd06589abd18 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cedf36542470b7be17eb5f36b1617bcb2260f188edc8cd5729a85dfe2e9b563 +size 2446 diff --git a/data/2025/2504_07xxx/2504.07960/images/de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg b/data/2025/2504_07xxx/2504.07960/images/de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edfb1b8a3b2570033257cc4804e2a5fb05c042cb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b8a12d54669bd5c88df0e986d4be4ec7c4a1cefcb7af82c753528f35e5eb86e +size 10402 diff --git a/data/2025/2504_07xxx/2504.07960/images/df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg b/data/2025/2504_07xxx/2504.07960/images/df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68a2c28c1739a9f6f209c7b38dbc170eaf195468 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e40e7ee7737bfdc601bb82eaac100afce2a6b3f6b89e788f9bced3a9b4b2ba8 +size 2457 diff --git a/data/2025/2504_07xxx/2504.07960/images/e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg b/data/2025/2504_07xxx/2504.07960/images/e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4c2c9772a702f92372df57f7cceda6d85fa5c2e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba1cc56011afc0abc1e367699a042521276f8133628afbe87e62d65ca8efc306 +size 19625 diff --git a/data/2025/2504_07xxx/2504.07960/images/e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg b/data/2025/2504_07xxx/2504.07960/images/e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a70f0cf71cbae066678faedf3fdd1e7e6faeee2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc75c9e9f19b0f39f18991561ac307891ccfcbba979c8a951c957a2a2c556d71 +size 3278 diff --git a/data/2025/2504_07xxx/2504.07960/images/e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg b/data/2025/2504_07xxx/2504.07960/images/e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..543be0dd59d0274465a948ee70547b88af9a1fa1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51661f455087f968f7f5f681eecff64a7452948562118c215ba6ee31c018d3c9 +size 9663 diff --git a/data/2025/2504_07xxx/2504.07960/images/e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg b/data/2025/2504_07xxx/2504.07960/images/e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55bfc2047a9c18e06865177f508a75238a56fcb8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad29aee169240f2a51aadf0db0d80a75b0e1a38b8a954420b9832cd4a0b4f59f +size 6371 diff --git a/data/2025/2504_07xxx/2504.07960/images/ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg b/data/2025/2504_07xxx/2504.07960/images/ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbf5b21b5bdca87bb6db8450af90c0ab03613cca --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49c1e697a6f1644d3c583b64da56a0264d9b1c004855573242e27ead681c8ccc +size 6086 diff --git a/data/2025/2504_07xxx/2504.07960/images/ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg b/data/2025/2504_07xxx/2504.07960/images/ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7981601ddef10a6f21ccf6c9a64f3af3386d55b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aa619d1b63ed4a4f75244cc7d2039d47b1672b570a4d2989640c1e63e8329f5 +size 104721 diff --git a/data/2025/2504_07xxx/2504.07960/images/ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg b/data/2025/2504_07xxx/2504.07960/images/ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0027e3bc42b300bfd227919e815be089034deb1b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c4cb79d23578281da43d39949b3740d0b13e3e8df50fe22ca70e8ecb10952e4 +size 3253 diff --git a/data/2025/2504_07xxx/2504.07960/images/ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg b/data/2025/2504_07xxx/2504.07960/images/ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6370190c282187ecd55fb1748c06310e7f25766f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b02bc53594f67799f86b942f0a6c95dfeb49d7dd00deb15d94a56b6c24afab21 +size 2551 diff --git a/data/2025/2504_07xxx/2504.07960/images/ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg b/data/2025/2504_07xxx/2504.07960/images/ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b6377d2a564cd51008c34aa97fe67ed9ce68a94 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc8abc3fcd37dd79f239f38ed33f0b99844309915c9cbc7bf740d6a744caea51 +size 4690 diff --git a/data/2025/2504_07xxx/2504.07960/images/ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg b/data/2025/2504_07xxx/2504.07960/images/ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg new file mode 100644 index 0000000000000000000000000000000000000000..494039028ab07dc33a769b5346197d4e13c3f0d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb5e4b0bb3589c21a3bec0e77c25bfa838b2f6c6fb4fe06ed1f57e748fde9b65 +size 3050 diff --git a/data/2025/2504_07xxx/2504.07960/images/f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg b/data/2025/2504_07xxx/2504.07960/images/f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d7f8418291205405a2c4156423b92d4b0e91d09 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e710ed0b2f1174063f363487dd1276960d5a26bf9f6c9ba48f6862b519c75de +size 8790 diff --git a/data/2025/2504_07xxx/2504.07960/images/f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg b/data/2025/2504_07xxx/2504.07960/images/f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc333da3e2c7150c59cc861f2241a347152aa7ae --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1878bd292f637abe44e1b4a05e495410d5733c2e40dba5c36cc0a1755ec05c70 +size 6174 diff --git a/data/2025/2504_07xxx/2504.07960/images/f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg b/data/2025/2504_07xxx/2504.07960/images/f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a34abb9c097b4532d67859d2e3488115bb6938f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fe33bdf29a89a5213994dbeccae2dba51c70a105eaa1ddb1b5236756d57c13b +size 5932 diff --git a/data/2025/2504_07xxx/2504.07960/images/f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg b/data/2025/2504_07xxx/2504.07960/images/f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae80321e723e26dc2e6de418e644e59be5c6bc4c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ace774087c221ab8a016769ad168d7c02fe391ddcca1ba58d754ebc53f04ccc +size 5002 diff --git a/data/2025/2504_07xxx/2504.07960/images/f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg b/data/2025/2504_07xxx/2504.07960/images/f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a527846a730877c4c2d2b00511aee60b74f91355 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21e631b44d37a105eb50978d86ace5cedfb7d72f17ef9a6ca9f9920acd6a1064 +size 3788 diff --git a/data/2025/2504_07xxx/2504.07960/images/fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg b/data/2025/2504_07xxx/2504.07960/images/fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f2f37bf3013ba3f59830d79288aafa950b3f17c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a5fc4e80bb0fb850669e2849e95f50593c3258c214fb0b458718e09dde2f5ec +size 10145 diff --git a/data/2025/2504_07xxx/2504.07960/images/fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg b/data/2025/2504_07xxx/2504.07960/images/fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4323ab49f87ee3908397c384b5b87ce8fa5c9e6b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e78c76764eca8c20d448d74c1ce690fbdcd439728ad895af666696caa9881c95 +size 3074 diff --git a/data/2025/2504_07xxx/2504.07960/images/fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg b/data/2025/2504_07xxx/2504.07960/images/fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..450967636ec73d040f4b2f0835a5dca6060dab5b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f74e140e1504720f301a509fc0a26da60d7e3bb8b2ec309d39288e3983ae777 +size 12068 diff --git a/data/2025/2504_07xxx/2504.07960/images/ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg b/data/2025/2504_07xxx/2504.07960/images/ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fb644e76f589d6faca9ca55d75cee39cc6469cd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/images/ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8c3016c7d4d3940311254aac03ea36743a63a317f8b89e528f37a3498f34f30 +size 27127 diff --git a/data/2025/2504_07xxx/2504.07960/layout.json b/data/2025/2504_07xxx/2504.07960/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..18c6b881113af1e6809e1776f7944083bd9261f6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07960/layout.json @@ -0,0 +1,18485 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 132, + 103, + 479, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 103, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 103, + 479, + 140 + ], + "type": "text", + "content": "VisualCloze: A Universal Image Generation Framework via Visual In-Context Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "spans": [ + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "content": "Zhong-Yu Li" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{1,4*}" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "content": " Ruoyi Du" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{2,4*}" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "content": " Juncheng Yan" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "content": " Le Zhuo" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "text", + "content": " Qilong Wu" + }, + { + "bbox": [ + 118, + 160, + 492, + 175 + ], + "type": "inline_equation", + "content": "^{4}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "spans": [ + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "text", + "content": "Zhen Li" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "inline_equation", + "content": "^{5\\dagger}" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "text", + "content": " Peng Gao" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "text", + "content": " Zhanyu Ma" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "text", + "content": " Ming-Ming Cheng" + }, + { + "bbox": [ + 156, + 175, + 456, + 190 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "spans": [ + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "type": "text", + "content": "VCIP, CS, Nankai University " + }, + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 189, + 514, + 203 + ], + "type": "text", + "content": "Beijing University of Posts and Telecommunications" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "spans": [ + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "text", + "content": "Tsinghua University " + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 83, + 203, + 527, + 218 + ], + "type": "text", + "content": "The Chinese University of Hong Kong" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 196, + 218, + 411, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 218, + 411, + 232 + ], + "spans": [ + { + "bbox": [ + 196, + 218, + 411, + 232 + ], + "type": "text", + "content": "Project page: https://visualcloze.github.io" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 242, + 113, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 242, + 113, + 252 + ], + "spans": [ + { + "bbox": [ + 69, + 242, + 113, + 252 + ], + "type": "text", + "content": "Understand" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 253, + 106, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 253, + 106, + 259 + ], + "spans": [ + { + "bbox": [ + 78, + 253, + 106, + 259 + ], + "type": "text", + "content": "the task" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 106, + 242, + 148, + 253 + ], + "blocks": [ + { + "bbox": [ + 106, + 242, + 148, + 253 + ], + "lines": [ + { + "bbox": [ + 106, + 242, + 148, + 253 + ], + "spans": [ + { + "bbox": [ + 106, + 242, + 148, + 253 + ], + "type": "image", + "image_path": "84a4b54e00041364199bdc1376b9bbd24b486ed080d34169fc1d3f7c10d42653.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 105, + 253, + 148, + 282 + ], + "blocks": [ + { + "bbox": [ + 105, + 253, + 148, + 282 + ], + "lines": [ + { + "bbox": [ + 105, + 253, + 148, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 148, + 282 + ], + "type": "image", + "image_path": "249c5eba9181e8fcc9090710c206e223e70e8c83341bea0710dcd66874769944.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 137, + 243, + 219, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 243, + 219, + 253 + ], + "spans": [ + { + "bbox": [ + 137, + 243, + 219, + 253 + ], + "type": "text", + "content": "In-context examples" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 151, + 253, + 179, + 282 + ], + "blocks": [ + { + "bbox": [ + 151, + 253, + 179, + 282 + ], + "lines": [ + { + "bbox": [ + 151, + 253, + 179, + 282 + ], + "spans": [ + { + "bbox": [ + 151, + 253, + 179, + 282 + ], + "type": "image", + "image_path": "fb04b2ab9ff17bcfb7fa35d7453094bf8d14956c8fdf0ab406495435650b43b6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 184, + 253, + 240, + 282 + ], + "blocks": [ + { + "bbox": [ + 184, + 253, + 240, + 282 + ], + "lines": [ + { + "bbox": [ + 184, + 253, + 240, + 282 + ], + "spans": [ + { + "bbox": [ + 184, + 253, + 240, + 282 + ], + "type": "image", + "image_path": "4393be7c6f29bc43f54a261a63eaa8c2189646433cf43bedd9b0919cfd199fad.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 100, + 282, + 130, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 282, + 130, + 289 + ], + "spans": [ + { + "bbox": [ + 100, + 282, + 130, + 289 + ], + "type": "text", + "content": "Style +" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 100, + 289, + 130, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 289, + 130, + 297 + ], + "spans": [ + { + "bbox": [ + 100, + 289, + 130, + 297 + ], + "type": "text", + "content": "Subject =" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 137, + 289, + 149, + 297 + ], + "blocks": [ + { + "bbox": [ + 137, + 289, + 149, + 297 + ], + "lines": [ + { + "bbox": [ + 137, + 289, + 149, + 297 + ], + "spans": [ + { + "bbox": [ + 137, + 289, + 149, + 297 + ], + "type": "image", + "image_path": "75d89f5d8503df369fc39da3705554613610fada9cfa6423518ea4dc32655477.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 149, + 289, + 205, + 319 + ], + "blocks": [ + { + "bbox": [ + 149, + 289, + 205, + 319 + ], + "lines": [ + { + "bbox": [ + 149, + 289, + 205, + 319 + ], + "spans": [ + { + "bbox": [ + 149, + 289, + 205, + 319 + ], + "type": "image", + "image_path": "1698230c67af2bc59f13117325208987cdf1153085e63af2e77e4bcdc8626b1d.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 211, + 289, + 222, + 319 + ], + "blocks": [ + { + "bbox": [ + 211, + 289, + 222, + 319 + ], + "lines": [ + { + "bbox": [ + 211, + 289, + 222, + 319 + ], + "spans": [ + { + "bbox": [ + 211, + 289, + 222, + 319 + ], + "type": "image", + "image_path": "5eb58d3f654a129ad2c34118c2949abb78a7b3313af457599e18176510edc4f3.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 223, + 289, + 255, + 319 + ], + "blocks": [ + { + "bbox": [ + 223, + 289, + 255, + 319 + ], + "lines": [ + { + "bbox": [ + 223, + 289, + 255, + 319 + ], + "spans": [ + { + "bbox": [ + 223, + 289, + 255, + 319 + ], + "type": "image", + "image_path": "2202c6af3c094ac1789278049ef49938d293faf990575f94e378d7aa01bd8828.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 257, + 246, + 328, + 304 + ], + "blocks": [ + { + "bbox": [ + 257, + 246, + 328, + 304 + ], + "lines": [ + { + "bbox": [ + 257, + 246, + 328, + 304 + ], + "spans": [ + { + "bbox": [ + 257, + 246, + 328, + 304 + ], + "type": "image", + "image_path": "aba5d586ab85b4c732671dccc30e8c30f804cf94b08115818770277e80fdc808.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 328, + 247, + 395, + 316 + ], + "blocks": [ + { + "bbox": [ + 328, + 247, + 395, + 316 + ], + "lines": [ + { + "bbox": [ + 328, + 247, + 395, + 316 + ], + "spans": [ + { + "bbox": [ + 328, + 247, + 395, + 316 + ], + "type": "image", + "image_path": "c9cbc9cc46f803711a2f5087bf774fbaf581c69dbcd8e5093a0df31bdda91ddd.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 402, + 247, + 471, + 304 + ], + "blocks": [ + { + "bbox": [ + 402, + 247, + 471, + 304 + ], + "lines": [ + { + "bbox": [ + 402, + 247, + 471, + 304 + ], + "spans": [ + { + "bbox": [ + 402, + 247, + 471, + 304 + ], + "type": "image", + "image_path": "bd0b98157e40f50c61a930bc7c3484210044f7bb4acfee3766b5f1e2a62ce842.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 473, + 247, + 541, + 316 + ], + "blocks": [ + { + "bbox": [ + 473, + 247, + 541, + 316 + ], + "lines": [ + { + "bbox": [ + 473, + 247, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 473, + 247, + 541, + 316 + ], + "type": "image", + "image_path": "7ff91ae5472dbf0e4306622f6e5faf691a809bce8c60153b9d9f3805b3ed45fb.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 271, + 306, + 331, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 306, + 331, + 313 + ], + "spans": [ + { + "bbox": [ + 271, + 306, + 331, + 313 + ], + "type": "text", + "content": "Dense Prediction" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 414, + 306, + 472, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 306, + 472, + 313 + ], + "spans": [ + { + "bbox": [ + 414, + 306, + 472, + 313 + ], + "type": "text", + "content": "Pose Estimation" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 70, + 349, + 115, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 115, + 357 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 115, + 357 + ], + "type": "text", + "content": "Fill blank grid" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 70, + 358, + 116, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 358, + 116, + 366 + ], + "spans": [ + { + "bbox": [ + 70, + 358, + 116, + 366 + ], + "type": "text", + "content": "by reasoning" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 119, + 325, + 194, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 325, + 194, + 335 + ], + "spans": [ + { + "bbox": [ + 119, + 325, + 194, + 335 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 119, + 335, + 181, + 365 + ], + "blocks": [ + { + "bbox": [ + 119, + 335, + 181, + 365 + ], + "lines": [ + { + "bbox": [ + 119, + 335, + 181, + 365 + ], + "spans": [ + { + "bbox": [ + 119, + 335, + 181, + 365 + ], + "type": "image", + "image_path": "f7f813c9d62261dca493ea9cea5ef56da08acae1825f427138e1cb653148b7af.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 182, + 335, + 211, + 365 + ], + "blocks": [ + { + "bbox": [ + 182, + 335, + 211, + 365 + ], + "lines": [ + { + "bbox": [ + 182, + 335, + 211, + 365 + ], + "spans": [ + { + "bbox": [ + 182, + 335, + 211, + 365 + ], + "type": "image", + "image_path": "092609907fdeb76319a8b135e6864cbfdaf3f44b0f64eee6b2bc9518ecd538c5.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 212, + 325, + 239, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 325, + 239, + 334 + ], + "spans": [ + { + "bbox": [ + 212, + 325, + 239, + 334 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 212, + 335, + 227, + 363 + ], + "blocks": [ + { + "bbox": [ + 212, + 335, + 227, + 363 + ], + "lines": [ + { + "bbox": [ + 212, + 335, + 227, + 363 + ], + "spans": [ + { + "bbox": [ + 212, + 335, + 227, + 363 + ], + "type": "image", + "image_path": "a79b31b137b8ac5a139b3dc80326a763e395c05cbeaf2c1fbf7f0ab7a6c5642a.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 227, + 342, + 235, + 355 + ], + "blocks": [ + { + "bbox": [ + 227, + 342, + 235, + 355 + ], + "lines": [ + { + "bbox": [ + 227, + 342, + 235, + 355 + ], + "spans": [ + { + "bbox": [ + 227, + 342, + 235, + 355 + ], + "type": "image", + "image_path": "2db8d9e6ba2c8127992b3a5110e95463524927abf523df959adf98d905d9ac09.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "bbox": [ + 150, + 373, + 240, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 373, + 240, + 383 + ], + "spans": [ + { + "bbox": [ + 150, + 373, + 240, + 383 + ], + "type": "text", + "content": "Extend to diverse tasks" + } + ] + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 257, + 320, + 327, + 377 + ], + "blocks": [ + { + "bbox": [ + 257, + 320, + 327, + 377 + ], + "lines": [ + { + "bbox": [ + 257, + 320, + 327, + 377 + ], + "spans": [ + { + "bbox": [ + 257, + 320, + 327, + 377 + ], + "type": "image", + "image_path": "4227aefc2e2bdd775eada89a92f8d6dbb11045e2d36aa517d80ff6fac123a2aa.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "bbox": [ + 266, + 379, + 331, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 379, + 331, + 387 + ], + "spans": [ + { + "bbox": [ + 266, + 379, + 331, + 387 + ], + "type": "text", + "content": "Image Restoration" + } + ] + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 328, + 320, + 395, + 389 + ], + "blocks": [ + { + "bbox": [ + 328, + 320, + 395, + 389 + ], + "lines": [ + { + "bbox": [ + 328, + 320, + 395, + 389 + ], + "spans": [ + { + "bbox": [ + 328, + 320, + 395, + 389 + ], + "type": "image", + "image_path": "863f9581ed491674421cbc2abe5eca4cd5538d9abd21ab564db8018f6735de92.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 402, + 320, + 471, + 376 + ], + "blocks": [ + { + "bbox": [ + 402, + 320, + 471, + 376 + ], + "lines": [ + { + "bbox": [ + 402, + 320, + 471, + 376 + ], + "spans": [ + { + "bbox": [ + 402, + 320, + 471, + 376 + ], + "type": "image", + "image_path": "9dcabbfd8c75b0c6cdadcff4e12d7bd5baaef8cd00fb64ea0e774437ac5110f8.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 473, + 320, + 541, + 389 + ], + "blocks": [ + { + "bbox": [ + 473, + 320, + 541, + 389 + ], + "lines": [ + { + "bbox": [ + 473, + 320, + 541, + 389 + ], + "spans": [ + { + "bbox": [ + 473, + 320, + 541, + 389 + ], + "type": "image", + "image_path": "d9d6747f04ff9c2858bf3fdc29d545b472e6d70bd11ff524bc2478f78a4d043e.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 79, + 396, + 120, + 448 + ], + "blocks": [ + { + "bbox": [ + 79, + 396, + 120, + 448 + ], + "lines": [ + { + "bbox": [ + 79, + 396, + 120, + 448 + ], + "spans": [ + { + "bbox": [ + 79, + 396, + 120, + 448 + ], + "type": "image", + "image_path": "2592260bdbec76c8a122218f8f01c344087274b81819d1f8c0e434b38c0b1774.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 449, + 170, + 457 + ], + "lines": [ + { + "bbox": [ + 86, + 449, + 170, + 457 + ], + "spans": [ + { + "bbox": [ + 86, + 449, + 170, + 457 + ], + "type": "text", + "content": "Subject + Layout + Style" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 137, + 396, + 202, + 448 + ], + "blocks": [ + { + "bbox": [ + 137, + 396, + 202, + 448 + ], + "lines": [ + { + "bbox": [ + 137, + 396, + 202, + 448 + ], + "spans": [ + { + "bbox": [ + 137, + 396, + 202, + 448 + ], + "type": "image", + "image_path": "940a7dcba50c975c76e446ee20715be0c4ab77ed609eace49e1313f4b4f2fac5.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 448, + 170, + 457 + ], + "lines": [ + { + "bbox": [ + 137, + 448, + 170, + 457 + ], + "spans": [ + { + "bbox": [ + 137, + 448, + 170, + 457 + ], + "type": "text", + "content": "Out + Style" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 203, + 396, + 265, + 460 + ], + "blocks": [ + { + "bbox": [ + 203, + 396, + 265, + 460 + ], + "lines": [ + { + "bbox": [ + 203, + 396, + 265, + 460 + ], + "spans": [ + { + "bbox": [ + 203, + 396, + 265, + 460 + ], + "type": "image", + "image_path": "508cbcf5bd841b99b378aea70c2c4cb285132b5cb2e168a31a3cdf249aa1e100.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 266, + 396, + 331, + 460 + ], + "blocks": [ + { + "bbox": [ + 266, + 396, + 331, + 460 + ], + "lines": [ + { + "bbox": [ + 266, + 396, + 331, + 460 + ], + "spans": [ + { + "bbox": [ + 266, + 396, + 331, + 460 + ], + "type": "image", + "image_path": "24b1c78dac9ec403cbf8363cdcd5e632ebc391d46e27bcb75148fea5ba6868d6.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 348, + 395, + 411, + 448 + ], + "blocks": [ + { + "bbox": [ + 348, + 395, + 411, + 448 + ], + "lines": [ + { + "bbox": [ + 348, + 395, + 411, + 448 + ], + "spans": [ + { + "bbox": [ + 348, + 395, + 411, + 448 + ], + "type": "image", + "image_path": "93f401a3ae1b820383a40ed9b15bea2bef7e24e3bda244669a1a79af432a5fd0.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "bbox": [ + 363, + 449, + 416, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 449, + 416, + 457 + ], + "spans": [ + { + "bbox": [ + 363, + 449, + 416, + 457 + ], + "type": "text", + "content": "Subject-driven" + } + ] + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 411, + 396, + 477, + 448 + ], + "blocks": [ + { + "bbox": [ + 411, + 396, + 477, + 448 + ], + "lines": [ + { + "bbox": [ + 411, + 396, + 477, + 448 + ], + "spans": [ + { + "bbox": [ + 411, + 396, + 477, + 448 + ], + "type": "image", + "image_path": "8b4fc23f069f0c170232eae403f1a21e6a887e53a17c4491126b10f013255ff1.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 448, + 471, + 457 + ], + "lines": [ + { + "bbox": [ + 414, + 448, + 471, + 457 + ], + "spans": [ + { + "bbox": [ + 414, + 448, + 471, + 457 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 477, + 448, + 541, + 457 + ], + "lines": [ + { + "bbox": [ + 477, + 448, + 541, + 457 + ], + "spans": [ + { + "bbox": [ + 477, + 448, + 541, + 457 + ], + "type": "inline_equation", + "content": "\\therefore m : x = 1" + }, + { + "bbox": [ + 477, + 448, + 541, + 457 + ], + "type": "text", + "content": " 或 " + }, + { + "bbox": [ + 477, + 448, + 541, + 457 + ], + "type": "inline_equation", + "content": "{3x} + {4y} + 1 = 0" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 48 + }, + { + "type": "image", + "bbox": [ + 477, + 396, + 542, + 448 + ], + "blocks": [ + { + "bbox": [ + 477, + 396, + 542, + 448 + ], + "lines": [ + { + "bbox": [ + 477, + 396, + 542, + 448 + ], + "spans": [ + { + "bbox": [ + 477, + 396, + 542, + 448 + ], + "type": "image", + "image_path": "2227fd4b48343bbc2e5a42bcd226b45f9dc129a4c514bc1ae1388c098343dfc2.jpg" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_body" + } + ], + "index": 50 + }, + { + "type": "image", + "bbox": [ + 78, + 464, + 115, + 523 + ], + "blocks": [ + { + "bbox": [ + 78, + 464, + 115, + 523 + ], + "lines": [ + { + "bbox": [ + 78, + 464, + 115, + 523 + ], + "spans": [ + { + "bbox": [ + 78, + 464, + 115, + 523 + ], + "type": "image", + "image_path": "c8add268e59b84fb865426faadb73b0ea791f4d3904ae87c861f13ec35a054e5.jpg" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_body" + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 116, + 464, + 162, + 524 + ], + "blocks": [ + { + "bbox": [ + 116, + 464, + 162, + 524 + ], + "lines": [ + { + "bbox": [ + 116, + 464, + 162, + 524 + ], + "spans": [ + { + "bbox": [ + 116, + 464, + 162, + 524 + ], + "type": "image", + "image_path": "aec23ed35e291af8c7df29434c10d984b9fadaf5bc6ff18946ad57d77dc73d24.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 170, + 464, + 212, + 511 + ], + "blocks": [ + { + "bbox": [ + 170, + 464, + 212, + 511 + ], + "lines": [ + { + "bbox": [ + 170, + 464, + 212, + 511 + ], + "spans": [ + { + "bbox": [ + 170, + 464, + 212, + 511 + ], + "type": "image", + "image_path": "aaf2c1562237f5ddb14c3eed6b9f102fd9b427dd8f8888b74c7e52557e1dd7fe.jpg" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_body" + } + ], + "index": 54 + }, + { + "type": "image", + "bbox": [ + 214, + 464, + 255, + 513 + ], + "blocks": [ + { + "bbox": [ + 177, + 513, + 214, + 521 + ], + "lines": [ + { + "bbox": [ + 177, + 513, + 214, + 521 + ], + "spans": [ + { + "bbox": [ + 177, + 513, + 214, + 521 + ], + "type": "text", + "content": "Relighting" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 214, + 464, + 255, + 513 + ], + "lines": [ + { + "bbox": [ + 214, + 464, + 255, + 513 + ], + "spans": [ + { + "bbox": [ + 214, + 464, + 255, + 513 + ], + "type": "image", + "image_path": "2e9ee140d2a4597e7fa85f3386ba1a0e2949ce6941fb40534e77123b7ea6392e.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 263, + 464, + 309, + 512 + ], + "blocks": [ + { + "bbox": [ + 263, + 464, + 309, + 512 + ], + "lines": [ + { + "bbox": [ + 263, + 464, + 309, + 512 + ], + "spans": [ + { + "bbox": [ + 263, + 464, + 309, + 512 + ], + "type": "image", + "image_path": "f9b78ddccec0ccf409f2fc8879fa376edc9d39e8cb08907d81efb9a99c5c6a2c.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 513, + 309, + 521 + ], + "lines": [ + { + "bbox": [ + 264, + 513, + 309, + 521 + ], + "spans": [ + { + "bbox": [ + 264, + 513, + 309, + 521 + ], + "type": "text", + "content": "Virtual Try-On" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 310, + 464, + 351, + 514 + ], + "blocks": [ + { + "bbox": [ + 310, + 464, + 351, + 514 + ], + "lines": [ + { + "bbox": [ + 310, + 464, + 351, + 514 + ], + "spans": [ + { + "bbox": [ + 310, + 464, + 351, + 514 + ], + "type": "image", + "image_path": "ed5f6d948058c554d40aa00b169db869fe0195040f2a10e59f002c94628c2c0a.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 351, + 464, + 400, + 514 + ], + "blocks": [ + { + "bbox": [ + 351, + 464, + 400, + 514 + ], + "lines": [ + { + "bbox": [ + 351, + 464, + 400, + 514 + ], + "spans": [ + { + "bbox": [ + 351, + 464, + 400, + 514 + ], + "type": "image", + "image_path": "9da1aab1b366244e777d1117796b8fce76b9d73519a64f26cd2fc9dbeab6ff3b.jpg" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_body" + } + ], + "index": 60 + }, + { + "type": "image", + "bbox": [ + 408, + 464, + 450, + 511 + ], + "blocks": [ + { + "bbox": [ + 408, + 464, + 450, + 511 + ], + "lines": [ + { + "bbox": [ + 408, + 464, + 450, + 511 + ], + "spans": [ + { + "bbox": [ + 408, + 464, + 450, + 511 + ], + "type": "image", + "image_path": "c506e86c12a0d7f207718f07d5028554f851e083963db87677d21e69d797c3e4.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 422, + 513, + 472, + 521 + ], + "lines": [ + { + "bbox": [ + 422, + 513, + 472, + 521 + ], + "spans": [ + { + "bbox": [ + 422, + 513, + 472, + 521 + ], + "type": "text", + "content": "Style Transfer" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 61 + }, + { + "type": "image", + "bbox": [ + 451, + 464, + 496, + 511 + ], + "blocks": [ + { + "bbox": [ + 451, + 464, + 496, + 511 + ], + "lines": [ + { + "bbox": [ + 451, + 464, + 496, + 511 + ], + "spans": [ + { + "bbox": [ + 451, + 464, + 496, + 511 + ], + "type": "image", + "image_path": "b28958482ff95ad335836742e9a81e9645dc5376540a879f9995da4107d93fc1.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 513, + 489, + 520 + ], + "lines": [ + { + "bbox": [ + 452, + 513, + 489, + 520 + ], + "spans": [ + { + "bbox": [ + 452, + 513, + 489, + 520 + ], + "type": "text", + "content": "ansfer" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + } + ], + "index": 63 + }, + { + "type": "image", + "bbox": [ + 498, + 464, + 541, + 523 + ], + "blocks": [ + { + "bbox": [ + 498, + 464, + 541, + 523 + ], + "lines": [ + { + "bbox": [ + 498, + 464, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 498, + 464, + 541, + 523 + ], + "type": "image", + "image_path": "47b133f962d1943ebccc7d7b95526dd22ba979ddb99f83ec1f369a7fafebc9be.jpg" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_body" + } + ], + "index": 65 + }, + { + "type": "image", + "bbox": [ + 70, + 529, + 158, + 568 + ], + "blocks": [ + { + "bbox": [ + 70, + 529, + 158, + 568 + ], + "lines": [ + { + "bbox": [ + 70, + 529, + 158, + 568 + ], + "spans": [ + { + "bbox": [ + 70, + 529, + 158, + 568 + ], + "type": "image", + "image_path": "ee5e5e4dc0f451c0d87afc70fdfb4e5b425f1d04355975da1e325a69e82d76cb.jpg" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 569, + 124, + 578 + ], + "lines": [ + { + "bbox": [ + 77, + 569, + 124, + 578 + ], + "spans": [ + { + "bbox": [ + 77, + 569, + 124, + 578 + ], + "type": "text", + "content": "Editing (Add)" + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_caption" + } + ], + "index": 66 + }, + { + "type": "image", + "bbox": [ + 160, + 529, + 242, + 579 + ], + "blocks": [ + { + "bbox": [ + 160, + 529, + 242, + 579 + ], + "lines": [ + { + "bbox": [ + 160, + 529, + 242, + 579 + ], + "spans": [ + { + "bbox": [ + 160, + 529, + 242, + 579 + ], + "type": "image", + "image_path": "1dc3854fd5ac3d6dcffbeb4a1d39085a938d5eb202d29657e49a2f6b7d256cda.jpg" + } + ] + } + ], + "index": 68, + "angle": 0, + "type": "image_body" + } + ], + "index": 68 + }, + { + "type": "image", + "bbox": [ + 244, + 529, + 317, + 579 + ], + "blocks": [ + { + "bbox": [ + 244, + 529, + 317, + 579 + ], + "lines": [ + { + "bbox": [ + 244, + 529, + 317, + 579 + ], + "spans": [ + { + "bbox": [ + 244, + 529, + 317, + 579 + ], + "type": "image", + "image_path": "1a72a8d71ec0685fd94de7f2a505d77e8c5208a02955aace9b945bd03d575d65.jpg" + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_body" + } + ], + "index": 69 + }, + { + "type": "image", + "bbox": [ + 317, + 529, + 391, + 579 + ], + "blocks": [ + { + "bbox": [ + 317, + 529, + 391, + 579 + ], + "lines": [ + { + "bbox": [ + 317, + 529, + 391, + 579 + ], + "spans": [ + { + "bbox": [ + 317, + 529, + 391, + 579 + ], + "type": "image", + "image_path": "66ed0ab3c6fbf09c3be50cfee5d84e76b0392f0983e9eb68ce2439a48844dc56.jpg" + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_body" + } + ], + "index": 70 + }, + { + "type": "image", + "bbox": [ + 391, + 529, + 441, + 569 + ], + "blocks": [ + { + "bbox": [ + 391, + 529, + 441, + 569 + ], + "lines": [ + { + "bbox": [ + 391, + 529, + 441, + 569 + ], + "spans": [ + { + "bbox": [ + 391, + 529, + 441, + 569 + ], + "type": "image", + "image_path": "ef3a55e1c0e0d92f96cdbb5e7bf19885fc97fd1438490113606cf105ff38a722.jpg" + } + ] + } + ], + "index": 71, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 399, + 570, + 479, + 578 + ], + "lines": [ + { + "bbox": [ + 399, + 570, + 479, + 578 + ], + "spans": [ + { + "bbox": [ + 399, + 570, + 479, + 578 + ], + "type": "text", + "content": "Subject-driven Editing" + } + ] + } + ], + "index": 72, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 441, + 570, + 491, + 578 + ], + "lines": [ + { + "bbox": [ + 441, + 570, + 491, + 578 + ], + "spans": [ + { + "bbox": [ + 441, + 570, + 491, + 578 + ], + "type": "text", + "content": "even Editing" + } + ] + } + ], + "index": 74, + "angle": 0, + "type": "image_caption" + } + ], + "index": 71 + }, + { + "type": "image", + "bbox": [ + 441, + 529, + 490, + 569 + ], + "blocks": [ + { + "bbox": [ + 441, + 529, + 490, + 569 + ], + "lines": [ + { + "bbox": [ + 441, + 529, + 490, + 569 + ], + "spans": [ + { + "bbox": [ + 441, + 529, + 490, + 569 + ], + "type": "image", + "image_path": "ed0aac7d61ace1eba9a61c459cf3c93e45083cb784032d709343f6143259f33c.jpg" + } + ] + } + ], + "index": 73, + "angle": 0, + "type": "image_body" + } + ], + "index": 73 + }, + { + "type": "image", + "bbox": [ + 492, + 529, + 541, + 579 + ], + "blocks": [ + { + "bbox": [ + 492, + 529, + 541, + 579 + ], + "lines": [ + { + "bbox": [ + 492, + 529, + 541, + 579 + ], + "spans": [ + { + "bbox": [ + 492, + 529, + 541, + 579 + ], + "type": "image", + "image_path": "7a1fb4d42aae0b909975ed02636653475ab979e238853e1d19e586ee9b214afa.jpg" + } + ] + } + ], + "index": 75, + "angle": 0, + "type": "image_body" + } + ], + "index": 75 + }, + { + "type": "image", + "bbox": [ + 70, + 585, + 127, + 642 + ], + "blocks": [ + { + "bbox": [ + 70, + 585, + 127, + 642 + ], + "lines": [ + { + "bbox": [ + 70, + 585, + 127, + 642 + ], + "spans": [ + { + "bbox": [ + 70, + 585, + 127, + 642 + ], + "type": "image", + "image_path": "29e6e033eba6f9c828268ab3603ad2247905ec18441924873e78e01090deabba.jpg" + } + ] + } + ], + "index": 76, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 649, + 555, + 683 + ], + "lines": [ + { + "bbox": [ + 55, + 649, + 555, + 683 + ], + "spans": [ + { + "bbox": [ + 55, + 649, + 555, + 683 + ], + "type": "text", + "content": "Figure 1. The top left illustrates our universal image generation framework based on visual in-context learning. Given one query of a specific task, the generative model learns the task by observing a few in-context examples presented as demonstrations. For each task, the generation result is indicated by a red box." + } + ] + } + ], + "index": 87, + "angle": 0, + "type": "image_caption" + } + ], + "index": 76 + }, + { + "type": "image", + "bbox": [ + 129, + 587, + 181, + 617 + ], + "blocks": [ + { + "bbox": [ + 129, + 587, + 181, + 617 + ], + "lines": [ + { + "bbox": [ + 129, + 587, + 181, + 617 + ], + "spans": [ + { + "bbox": [ + 129, + 587, + 181, + 617 + ], + "type": "image", + "image_path": "ae71def7d7afd6993f212b48147ff76d77f6b67ba677957013ffa307143b088f.jpg" + } + ] + } + ], + "index": 77, + "angle": 0, + "type": "image_body" + } + ], + "index": 77 + }, + { + "type": "image", + "bbox": [ + 185, + 587, + 238, + 637 + ], + "blocks": [ + { + "bbox": [ + 185, + 587, + 238, + 637 + ], + "lines": [ + { + "bbox": [ + 185, + 587, + 238, + 637 + ], + "spans": [ + { + "bbox": [ + 185, + 587, + 238, + 637 + ], + "type": "image", + "image_path": "b9e915ffc563ac1292d609bb50db8a3d661d89781b64ad46406c4a4066ae6ec0.jpg" + } + ] + } + ], + "index": 78, + "angle": 0, + "type": "image_body" + } + ], + "index": 78 + }, + { + "type": "image", + "bbox": [ + 242, + 587, + 298, + 642 + ], + "blocks": [ + { + "bbox": [ + 242, + 587, + 298, + 642 + ], + "lines": [ + { + "bbox": [ + 242, + 587, + 298, + 642 + ], + "spans": [ + { + "bbox": [ + 242, + 587, + 298, + 642 + ], + "type": "image", + "image_path": "e99f16afe43814fda782234d7d661493d65d0f8193c42181a2fb8646da77dfbb.jpg" + } + ] + } + ], + "index": 79, + "angle": 0, + "type": "image_body" + } + ], + "index": 79 + }, + { + "type": "image", + "bbox": [ + 303, + 585, + 362, + 617 + ], + "blocks": [ + { + "bbox": [ + 303, + 585, + 362, + 617 + ], + "lines": [ + { + "bbox": [ + 303, + 585, + 362, + 617 + ], + "spans": [ + { + "bbox": [ + 303, + 585, + 362, + 617 + ], + "type": "image", + "image_path": "1263fd0d3b503ab31990285eaa05a8492e113ec5ee63b7f9025a558be400f16f.jpg" + } + ] + } + ], + "index": 80, + "angle": 0, + "type": "image_body" + } + ], + "index": 80 + }, + { + "type": "image", + "bbox": [ + 315, + 617, + 347, + 633 + ], + "blocks": [ + { + "bbox": [ + 315, + 617, + 347, + 633 + ], + "lines": [ + { + "bbox": [ + 315, + 617, + 347, + 633 + ], + "spans": [ + { + "bbox": [ + 315, + 617, + 347, + 633 + ], + "type": "image", + "image_path": "7c4061b0dc13b470d7a84ab421c7b751f3730815bec422d66c175f2230d760b1.jpg" + } + ] + } + ], + "index": 81, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 633, + 356, + 640 + ], + "lines": [ + { + "bbox": [ + 315, + 633, + 356, + 640 + ], + "spans": [ + { + "bbox": [ + 315, + 633, + 356, + 640 + ], + "type": "text", + "content": "Multi-View" + } + ] + } + ], + "index": 82, + "angle": 0, + "type": "image_caption" + } + ], + "index": 81 + }, + { + "type": "image", + "bbox": [ + 377, + 587, + 417, + 639 + ], + "blocks": [ + { + "bbox": [ + 377, + 587, + 417, + 639 + ], + "lines": [ + { + "bbox": [ + 377, + 587, + 417, + 639 + ], + "spans": [ + { + "bbox": [ + 377, + 587, + 417, + 639 + ], + "type": "image", + "image_path": "1df33f9215c95c3263f0399bb5306ada5f1da2b9580d1f7258ea97c667a0082c.jpg" + } + ] + } + ], + "index": 83, + "angle": 0, + "type": "image_body" + } + ], + "index": 83 + }, + { + "type": "image", + "bbox": [ + 428, + 586, + 472, + 632 + ], + "blocks": [ + { + "bbox": [ + 428, + 586, + 472, + 632 + ], + "lines": [ + { + "bbox": [ + 428, + 586, + 472, + 632 + ], + "spans": [ + { + "bbox": [ + 428, + 586, + 472, + 632 + ], + "type": "image", + "image_path": "a0b9804799dd95f5f5a5f8a400d728989ad5e77cd0039295b88a70967f9e46bb.jpg" + } + ] + } + ], + "index": 84, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 632, + 472, + 639 + ], + "lines": [ + { + "bbox": [ + 430, + 632, + 472, + 639 + ], + "spans": [ + { + "bbox": [ + 430, + 632, + 472, + 639 + ], + "type": "text", + "content": "Multi-View" + } + ] + } + ], + "index": 85, + "angle": 0, + "type": "image_caption" + } + ], + "index": 84 + }, + { + "type": "image", + "bbox": [ + 499, + 587, + 541, + 635 + ], + "blocks": [ + { + "bbox": [ + 499, + 587, + 541, + 635 + ], + "lines": [ + { + "bbox": [ + 499, + 587, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 499, + 587, + 541, + 635 + ], + "type": "image", + "image_path": "8a4ca149908f1c3dc7ae4543913cbbac86677dfbc0b3e6595b8b3db7216b1fb3.jpg" + } + ] + } + ], + "index": 86, + "angle": 0, + "type": "image_body" + } + ], + "index": 86 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 219, + 35, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 35, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 35, + 574 + ], + "type": "text", + "content": "arXiv:2504.07960v3 [cs.CV] 14 Dec 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 88 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 95, + 108, + 129 + ], + "blocks": [ + { + "bbox": [ + 65, + 95, + 108, + 129 + ], + "lines": [ + { + "bbox": [ + 65, + 95, + 108, + 129 + ], + "spans": [ + { + "bbox": [ + 65, + 95, + 108, + 129 + ], + "type": "image", + "image_path": "df8c1d05aa538cfaab171fbf8a43d859274becad33105a818a882f24e87d6ac4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 129, + 107, + 137 + ], + "lines": [ + { + "bbox": [ + 66, + 129, + 107, + 137 + ], + "spans": [ + { + "bbox": [ + 66, + 129, + 107, + 137 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 110, + 95, + 154, + 129 + ], + "blocks": [ + { + "bbox": [ + 82, + 84, + 179, + 93 + ], + "lines": [ + { + "bbox": [ + 82, + 84, + 179, + 93 + ], + "spans": [ + { + "bbox": [ + 82, + 84, + 179, + 93 + ], + "type": "text", + "content": "Without In-context Example" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 110, + 95, + 154, + 129 + ], + "lines": [ + { + "bbox": [ + 110, + 95, + 154, + 129 + ], + "spans": [ + { + "bbox": [ + 110, + 95, + 154, + 129 + ], + "type": "image", + "image_path": "4c8e89482096035b9b67bf8d9f75fde004d8898eef458d3627c5b17cc693e0a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 129, + 153, + 137 + ], + "lines": [ + { + "bbox": [ + 111, + 129, + 153, + 137 + ], + "spans": [ + { + "bbox": [ + 111, + 129, + 153, + 137 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 156, + 95, + 199, + 129 + ], + "blocks": [ + { + "bbox": [ + 156, + 95, + 199, + 129 + ], + "lines": [ + { + "bbox": [ + 156, + 95, + 199, + 129 + ], + "spans": [ + { + "bbox": [ + 156, + 95, + 199, + 129 + ], + "type": "image", + "image_path": "a93592ea83277e4116662db6789d092b4506ba785c49b5ef69f3ece170981951.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 129, + 179, + 137 + ], + "lines": [ + { + "bbox": [ + 157, + 129, + 179, + 137 + ], + "spans": [ + { + "bbox": [ + 157, + 129, + 179, + 137 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 66, + 156, + 108, + 197 + ], + "blocks": [ + { + "bbox": [ + 85, + 146, + 176, + 155 + ], + "lines": [ + { + "bbox": [ + 85, + 146, + 176, + 155 + ], + "spans": [ + { + "bbox": [ + 85, + 146, + 176, + 155 + ], + "type": "text", + "content": "+One In-context Example" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 66, + 156, + 108, + 197 + ], + "lines": [ + { + "bbox": [ + 66, + 156, + 108, + 197 + ], + "spans": [ + { + "bbox": [ + 66, + 156, + 108, + 197 + ], + "type": "image", + "image_path": "225435a613094edea23cdf30cd5aef8ff20c0846d6767e1c720739f28dcfdda9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 111, + 156, + 153, + 197 + ], + "blocks": [ + { + "bbox": [ + 111, + 156, + 153, + 197 + ], + "lines": [ + { + "bbox": [ + 111, + 156, + 153, + 197 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 153, + 197 + ], + "type": "image", + "image_path": "7ffa1029cfcefa94e3cf5bdaf17569e7ed77473937a42d10f5327d8ba5f3baa4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 156, + 156, + 198, + 197 + ], + "blocks": [ + { + "bbox": [ + 156, + 156, + 198, + 197 + ], + "lines": [ + { + "bbox": [ + 156, + 156, + 198, + 197 + ], + "spans": [ + { + "bbox": [ + 156, + 156, + 198, + 197 + ], + "type": "image", + "image_path": "2aae421c0d97f7367323bd96a3b60c8ad03dff85be0351db327a74916b1c7eb7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 65, + 200, + 108, + 234 + ], + "blocks": [ + { + "bbox": [ + 65, + 200, + 108, + 234 + ], + "lines": [ + { + "bbox": [ + 65, + 200, + 108, + 234 + ], + "spans": [ + { + "bbox": [ + 65, + 200, + 108, + 234 + ], + "type": "image", + "image_path": "bbde4e344c92cdffbb5d6d627f513b6db53c242132d666b089c56062905f8f21.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 235, + 107, + 242 + ], + "lines": [ + { + "bbox": [ + 66, + 235, + 107, + 242 + ], + "spans": [ + { + "bbox": [ + 66, + 235, + 107, + 242 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 268, + 553, + 305 + ], + "lines": [ + { + "bbox": [ + 55, + 268, + 553, + 305 + ], + "spans": [ + { + "bbox": [ + 55, + 268, + 553, + 305 + ], + "type": "text", + "content": "face, to generate [IMAGE3] that faces the center of the lens. The last row is: making [IMAGE1] the standing woman the final row is: the woman's frontal face that faces the center of the lens. [IMAGE2] sit down and give the thumbs up. Figure 2. Unseen Tasks : Generalizing to tasks unseen during training via in-context learning. More in-context examples lead to more accurate results." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 111, + 200, + 153, + 234 + ], + "blocks": [ + { + "bbox": [ + 111, + 200, + 153, + 234 + ], + "lines": [ + { + "bbox": [ + 111, + 200, + 153, + 234 + ], + "spans": [ + { + "bbox": [ + 111, + 200, + 153, + 234 + ], + "type": "image", + "image_path": "6e7c4b50a411b64ea2fa9b3901aa00c73ad2e08eb154636134429d2e7293b86d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 235, + 153, + 242 + ], + "lines": [ + { + "bbox": [ + 111, + 235, + 153, + 242 + ], + "spans": [ + { + "bbox": [ + 111, + 235, + 153, + 242 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 156, + 200, + 199, + 234 + ], + "blocks": [ + { + "bbox": [ + 156, + 200, + 199, + 234 + ], + "lines": [ + { + "bbox": [ + 156, + 200, + 199, + 234 + ], + "spans": [ + { + "bbox": [ + 156, + 200, + 199, + 234 + ], + "type": "image", + "image_path": "49e8c714009dcb15c30f842353e6790b503d26f066686ad9343c23d792f474af.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 235, + 179, + 241 + ], + "lines": [ + { + "bbox": [ + 157, + 235, + 179, + 241 + ], + "spans": [ + { + "bbox": [ + 157, + 235, + 179, + 241 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 210, + 111, + 252, + 153 + ], + "blocks": [ + { + "bbox": [ + 227, + 93, + 323, + 102 + ], + "lines": [ + { + "bbox": [ + 227, + 93, + 323, + 102 + ], + "spans": [ + { + "bbox": [ + 227, + 93, + 323, + 102 + ], + "type": "text", + "content": "+ Two In-context Examples" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 210, + 111, + 252, + 153 + ], + "lines": [ + { + "bbox": [ + 210, + 111, + 252, + 153 + ], + "spans": [ + { + "bbox": [ + 210, + 111, + 252, + 153 + ], + "type": "image", + "image_path": "9b0e95ce6b5f659443d087b5d3ecac99c7d060cbfb474e9fdbe6bee5a539dfa4.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 255, + 111, + 296, + 153 + ], + "blocks": [ + { + "bbox": [ + 255, + 111, + 296, + 153 + ], + "lines": [ + { + "bbox": [ + 255, + 111, + 296, + 153 + ], + "spans": [ + { + "bbox": [ + 255, + 111, + 296, + 153 + ], + "type": "image", + "image_path": "1adeb98c09571df20505a5a4f0305250bc8edd084d989b33f63c66a87587c7b6.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 299, + 111, + 342, + 153 + ], + "blocks": [ + { + "bbox": [ + 299, + 111, + 342, + 153 + ], + "lines": [ + { + "bbox": [ + 299, + 111, + 342, + 153 + ], + "spans": [ + { + "bbox": [ + 299, + 111, + 342, + 153 + ], + "type": "image", + "image_path": "865ed759eb5d007006a5967c548a618725dba0bc159ec85228032aa3ce5813b0.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 209, + 156, + 252, + 197 + ], + "blocks": [ + { + "bbox": [ + 209, + 156, + 252, + 197 + ], + "lines": [ + { + "bbox": [ + 209, + 156, + 252, + 197 + ], + "spans": [ + { + "bbox": [ + 209, + 156, + 252, + 197 + ], + "type": "image", + "image_path": "590911237aefab4740b5c175aafe250be5092b518e279faa3523a69cb26b5770.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 255, + 156, + 296, + 197 + ], + "blocks": [ + { + "bbox": [ + 255, + 156, + 296, + 197 + ], + "lines": [ + { + "bbox": [ + 255, + 156, + 296, + 197 + ], + "spans": [ + { + "bbox": [ + 255, + 156, + 296, + 197 + ], + "type": "image", + "image_path": "2d0bebf1658a821fcc6e82cac71ba626d69b22bdfd41b0163b1654d23b35e58b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 299, + 156, + 342, + 197 + ], + "blocks": [ + { + "bbox": [ + 299, + 156, + 342, + 197 + ], + "lines": [ + { + "bbox": [ + 299, + 156, + 342, + 197 + ], + "spans": [ + { + "bbox": [ + 299, + 156, + 342, + 197 + ], + "type": "image", + "image_path": "35101d5b91c55b650a4ca2e9d86bb2cb516b3928546e9cc30ced8b9e1acfd56c.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 209, + 200, + 252, + 234 + ], + "blocks": [ + { + "bbox": [ + 209, + 200, + 252, + 234 + ], + "lines": [ + { + "bbox": [ + 209, + 200, + 252, + 234 + ], + "spans": [ + { + "bbox": [ + 209, + 200, + 252, + 234 + ], + "type": "image", + "image_path": "1ced14dcdb0f7655cb23c43ba76afe931b2f069d0003e9406fbb4efecf6621b0.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 235, + 252, + 242 + ], + "lines": [ + { + "bbox": [ + 209, + 235, + 252, + 242 + ], + "spans": [ + { + "bbox": [ + 209, + 235, + 252, + 242 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 255, + 200, + 296, + 234 + ], + "blocks": [ + { + "bbox": [ + 255, + 200, + 296, + 234 + ], + "lines": [ + { + "bbox": [ + 255, + 200, + 296, + 234 + ], + "spans": [ + { + "bbox": [ + 255, + 200, + 296, + 234 + ], + "type": "image", + "image_path": "de40888ebddd978640967ce3e0a2098836df789420975c6582e6a62980f7cb49.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 235, + 296, + 242 + ], + "lines": [ + { + "bbox": [ + 255, + 235, + 296, + 242 + ], + "spans": [ + { + "bbox": [ + 255, + 235, + 296, + 242 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 299, + 200, + 342, + 234 + ], + "blocks": [ + { + "bbox": [ + 299, + 200, + 342, + 234 + ], + "lines": [ + { + "bbox": [ + 299, + 200, + 342, + 234 + ], + "spans": [ + { + "bbox": [ + 299, + 200, + 342, + 234 + ], + "type": "image", + "image_path": "8cee65b550f76526a0ec36b3d610f2bca88bd275c26b1b73a5aa107579965a84.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 235, + 342, + 242 + ], + "lines": [ + { + "bbox": [ + 299, + 235, + 342, + 242 + ], + "spans": [ + { + "bbox": [ + 299, + 235, + 342, + 242 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 358, + 94, + 402, + 129 + ], + "blocks": [ + { + "bbox": [ + 353, + 84, + 451, + 93 + ], + "lines": [ + { + "bbox": [ + 353, + 84, + 451, + 93 + ], + "spans": [ + { + "bbox": [ + 353, + 84, + 451, + 93 + ], + "type": "text", + "content": "Without In-context Example" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 358, + 94, + 402, + 129 + ], + "lines": [ + { + "bbox": [ + 358, + 94, + 402, + 129 + ], + "spans": [ + { + "bbox": [ + 358, + 94, + 402, + 129 + ], + "type": "image", + "image_path": "87975a69a7cfa15925ae1c6c60a37382b5ca478a877f5b0170fd190aa93a84c6.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 129, + 400, + 137 + ], + "lines": [ + { + "bbox": [ + 359, + 129, + 400, + 137 + ], + "spans": [ + { + "bbox": [ + 359, + 129, + 400, + 137 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 403, + 94, + 446, + 129 + ], + "blocks": [ + { + "bbox": [ + 403, + 94, + 446, + 129 + ], + "lines": [ + { + "bbox": [ + 403, + 94, + 446, + 129 + ], + "spans": [ + { + "bbox": [ + 403, + 94, + 446, + 129 + ], + "type": "image", + "image_path": "0011099c4f11710feccc82686f0023255f8adccc3a9a19fce321b9d65917a02a.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 129, + 425, + 137 + ], + "lines": [ + { + "bbox": [ + 405, + 129, + 425, + 137 + ], + "spans": [ + { + "bbox": [ + 405, + 129, + 425, + 137 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 455, + 93, + 547, + 102 + ], + "lines": [ + { + "bbox": [ + 455, + 93, + 547, + 102 + ], + "spans": [ + { + "bbox": [ + 455, + 93, + 547, + 102 + ], + "type": "text", + "content": "+ Two In-context Example" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 358, + 156, + 401, + 197 + ], + "blocks": [ + { + "bbox": [ + 354, + 146, + 447, + 155 + ], + "lines": [ + { + "bbox": [ + 354, + 146, + 447, + 155 + ], + "spans": [ + { + "bbox": [ + 354, + 146, + 447, + 155 + ], + "type": "text", + "content": "+ One In-context Example" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 358, + 156, + 401, + 197 + ], + "lines": [ + { + "bbox": [ + 358, + 156, + 401, + 197 + ], + "spans": [ + { + "bbox": [ + 358, + 156, + 401, + 197 + ], + "type": "image", + "image_path": "82142d68ddec7246b19ed5b8d35074aabe6ba13595fe20130c05a1bb064bb661.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 404, + 156, + 445, + 197 + ], + "blocks": [ + { + "bbox": [ + 404, + 156, + 445, + 197 + ], + "lines": [ + { + "bbox": [ + 404, + 156, + 445, + 197 + ], + "spans": [ + { + "bbox": [ + 404, + 156, + 445, + 197 + ], + "type": "image", + "image_path": "6bbda712bd7907bc495deaf542b4fbea3eb5d4414397d6b8f495eb112c6b6403.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 457, + 111, + 499, + 152 + ], + "blocks": [ + { + "bbox": [ + 457, + 111, + 499, + 152 + ], + "lines": [ + { + "bbox": [ + 457, + 111, + 499, + 152 + ], + "spans": [ + { + "bbox": [ + 457, + 111, + 499, + 152 + ], + "type": "image", + "image_path": "5f64dd49c40006e0644a0d6cb82c0a6b7c54d01053b61838f7c4a4e3da0c8672.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 503, + 111, + 544, + 152 + ], + "blocks": [ + { + "bbox": [ + 503, + 111, + 544, + 152 + ], + "lines": [ + { + "bbox": [ + 503, + 111, + 544, + 152 + ], + "spans": [ + { + "bbox": [ + 503, + 111, + 544, + 152 + ], + "type": "image", + "image_path": "4524ba90197d6faf3924f2fa848409b267d2ba9b74ae737bf113379c1d8f9fb6.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 358, + 199, + 401, + 234 + ], + "blocks": [ + { + "bbox": [ + 358, + 199, + 401, + 234 + ], + "lines": [ + { + "bbox": [ + 358, + 199, + 401, + 234 + ], + "spans": [ + { + "bbox": [ + 358, + 199, + 401, + 234 + ], + "type": "image", + "image_path": "0f2678607e6c892270344d986bc5156a63dca9f44988435e90886ea70b2ba0ed.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 235, + 399, + 242 + ], + "lines": [ + { + "bbox": [ + 359, + 235, + 399, + 242 + ], + "spans": [ + { + "bbox": [ + 359, + 235, + 399, + 242 + ], + "type": "text", + "content": "Visual Prompt" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 353, + 246, + 549, + 283 + ], + "lines": [ + { + "bbox": [ + 353, + 246, + 549, + 283 + ], + "spans": [ + { + "bbox": [ + 353, + 246, + 549, + 283 + ], + "type": "text", + "content": "Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: making [IMAGE1] the standing woman [IMAGE2] sit down and give the thumbs up." + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 404, + 199, + 445, + 234 + ], + "blocks": [ + { + "bbox": [ + 404, + 199, + 445, + 234 + ], + "lines": [ + { + "bbox": [ + 404, + 199, + 445, + 234 + ], + "spans": [ + { + "bbox": [ + 404, + 199, + 445, + 234 + ], + "type": "image", + "image_path": "6ef7988092c3f099f7c744d99b5798d8e128ea6928adf080cd75abb614fc6081.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 235, + 425, + 242 + ], + "lines": [ + { + "bbox": [ + 406, + 235, + 425, + 242 + ], + "spans": [ + { + "bbox": [ + 406, + 235, + 425, + 242 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 458, + 155, + 500, + 197 + ], + "blocks": [ + { + "bbox": [ + 458, + 155, + 500, + 197 + ], + "lines": [ + { + "bbox": [ + 458, + 155, + 500, + 197 + ], + "spans": [ + { + "bbox": [ + 458, + 155, + 500, + 197 + ], + "type": "image", + "image_path": "62a0321b2defdca71a8991beaa7c0d9246db575e1936a7f07e2aa3ff4255ef5d.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 503, + 156, + 544, + 197 + ], + "blocks": [ + { + "bbox": [ + 503, + 156, + 544, + 197 + ], + "lines": [ + { + "bbox": [ + 503, + 156, + 544, + 197 + ], + "spans": [ + { + "bbox": [ + 503, + 156, + 544, + 197 + ], + "type": "image", + "image_path": "0a8c11abc1d3585d2309684f2424ffa5cfc40ddeb9c7ebcd550291520024d036.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 457, + 199, + 501, + 234 + ], + "blocks": [ + { + "bbox": [ + 457, + 199, + 501, + 234 + ], + "lines": [ + { + "bbox": [ + 457, + 199, + 501, + 234 + ], + "spans": [ + { + "bbox": [ + 457, + 199, + 501, + 234 + ], + "type": "image", + "image_path": "7725e502055d67d3754bb68f8865a468733e96410a1f407141a16aff82504871.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 503, + 199, + 545, + 234 + ], + "blocks": [ + { + "bbox": [ + 503, + 199, + 545, + 234 + ], + "lines": [ + { + "bbox": [ + 503, + 199, + 545, + 234 + ], + "spans": [ + { + "bbox": [ + 503, + 199, + 545, + 234 + ], + "type": "image", + "image_path": "1f186305b4bd17875edafaef1ce3e6238205a54a85f06cae50a1c0fc8d34d92f.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + }, + { + "bbox": [ + 152, + 316, + 199, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 316, + 199, + 328 + ], + "spans": [ + { + "bbox": [ + 152, + 316, + 199, + 328 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 55, + 342, + 296, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 342, + 296, + 642 + ], + "spans": [ + { + "bbox": [ + 55, + 342, + 296, + 642 + ], + "type": "text", + "content": "Recent progress in diffusion models significantly advances various image generation tasks. However, the current mainstream approach remains focused on building task-specific models, which have limited efficiency when supporting a wide range of different needs. While universal models attempt to address this limitation, they face critical challenges, including generalizable task instruction, appropriate task distributions, and unified architectural design. To tackle these challenges, we propose VisualCloze, a universal image generation framework, which supports a wide range of in-domain tasks, generalization to unseen ones, unseen unification of multiple tasks, and reverse generation. Unlike existing methods that rely on language-based task instruction, leading to task ambiguity and weak generalization, we integrate visual in-context learning, allowing models to identify tasks from visual demonstrations. Meanwhile, the inherent sparsity of visual task distributions hampers the learning of transferable knowledge across tasks. To this end, we introduce Graph200K, a graph-structured dataset that establishes various interrelated tasks, enhancing task density and transferable knowledge. Furthermore, we uncover that our unified image generation formulation shared a consistent objective with image infilling, enabling us to leverage the strong generative priors of pre-trained infilling models without modifying the architectures." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 56, + 647, + 135, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 135, + 659 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 135, + 659 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 55, + 668, + 296, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 668, + 296, + 692 + ], + "spans": [ + { + "bbox": [ + 55, + 668, + 296, + 692 + ], + "type": "text", + "content": "Recent advancements in image generation, propelled by the progress of diffusion models [15, 33, 88], have led to a" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 313, + 318, + 555, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 318, + 555, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 318, + 555, + 461 + ], + "type": "text", + "content": "wide range of applications, including image editing [69], style transfer [64, 81], virtual try-on [11, 12], and personalized generation [38, 54], among others. However, these tasks typically require task-specific models, which limit efficiency and scalability for real-world applications. In recent years, there has been growing interest in universal generative models [27, 39, 44], aiming to handle diverse image generation tasks, even unseen ones, within a single unified framework. Despite significant progress, some critical issues remain to be addressed, such as (1) distinguishable and generalizable task instruction, (2) comprehensive task coverage during training, and (3) a unified model architecture." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "content": "An ideal task instruction is crucial for guiding the model to process the desired task effectively. Existing methods primarily rely on language instructions [27, 44] or task-specific tokens [39] to distinguish the task to be performed. However, the complexity of visual tasks and the inherent gap between vision and language modalities make it hard for the model to understand language-only task descriptions, which leads to task confusion [39] and hinders generalization on unseen tasks [35, 71]. Moreover, pre-learned task-specific tokens constrain the model only to handle seen tasks. In contrast, large language models (LLMs) have successfully achieved unified multi-task modeling, partially due to the rise of in-context learning [5], which allows models to adapt various tasks using only a few demonstrations. We aim to replicate the concept of in-context learning in the pure visual modality, where the model learns the desired task directly from a few visual examples as task demonstrations, as shown in Fig. 1 (Left Top). In this setting, in-context learning shows strong potential for universal image generation. We summarize four key findings: (1) it supports various in-domain tasks with reduced task ambiguity (Fig. 1);" + } + ] + } + ], + "index": 56 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 703, + 139, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 703, + 139, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 703, + 139, + 712 + ], + "type": "text", + "content": "* Equal contribution" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 151, + 703, + 225, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 703, + 225, + 712 + ], + "spans": [ + { + "bbox": [ + 151, + 703, + 225, + 712 + ], + "type": "text", + "content": "Corresponding author" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 59 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 72, + 260, + 235 + ], + "blocks": [ + { + "bbox": [ + 61, + 72, + 260, + 235 + ], + "lines": [ + { + "bbox": [ + 61, + 72, + 260, + 235 + ], + "spans": [ + { + "bbox": [ + 61, + 72, + 260, + 235 + ], + "type": "image", + "image_path": "4524b24592705bd243982985dfcf7b75d8aa81c12f80b430ba50f7f3f1856fe8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 236, + 555, + 270 + ], + "lines": [ + { + "bbox": [ + 55, + 236, + 555, + 270 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 555, + 270 + ], + "type": "text", + "content": "Figure 3. Unseen Tasks: Leveraging in-context learning to unify multiple seen tasks into a single-step unseen task. Left: Unifying the [Depth to Image] and [Relighting] task into a single [Depth to Images with Various Lighting] task. Right: Unifying multiple dense prediction tasks into a joint prediction task. Results without visual context can be found in the appendix." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 271, + 74, + 553, + 235 + ], + "blocks": [ + { + "bbox": [ + 271, + 74, + 553, + 235 + ], + "lines": [ + { + "bbox": [ + 271, + 74, + 553, + 235 + ], + "spans": [ + { + "bbox": [ + 271, + 74, + 553, + 235 + ], + "type": "image", + "image_path": "74cf25e75f12bfa369263343b948539488e43676d1cdc73d00248e8230e91f3b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 280, + 295, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 280, + 295, + 400 + ], + "spans": [ + { + "bbox": [ + 54, + 280, + 295, + 400 + ], + "type": "text", + "content": "(2) it generalizes to unseen tasks (Fig. 2, Fig. 8); (3) as an unseen strategy for task unification, it can integrate multiple sub-tasks into a single step and generate intermediate results (Fig. 3); (4) it enables reverse generation, i.e., inferring a set of conditions from a given target (Fig. 9). While prior works [1, 3, 4, 43, 66, 71, 82] have also explored in-context learning in vision, they are largely constrained to specific domains (such as dense prediction or style transfer [67, 87]), or simplified generation settings involving only one condition and one target image [43, 60]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 401, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 401, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 401, + 295, + 640 + ], + "type": "text", + "content": "From the perspective of task distribution, visual tasks are inherently sparse compared to those in natural language processing because task-specific datasets [71, 85] for different tasks have minimal overlap [19, 32, 79]. Such sparse task learning isolates the knowledge of each task and limits the model from learning shared features across tasks. Moreover, the weak correlations between tasks hinder knowledge transfer and adaptability to new tasks. However, existing works in multi-task learning [10, 16, 31, 53] have verified the benefits of overlapping knowledge across related tasks. To alleviate the sparsity of visual tasks, we introduce a graph-structured dataset, Graph200K, where each image is associated with annotations spanning five metatasks, i.e., conditional generation [80], IP preservation [76], style transfer [81], image editing [69], and restoration [77]. By combining different conditions, we train the model with a variety of tasks that overlap with each other. Given this highly overlapping and compact task space, our dataset significantly increases task density, allowing the model to learn shared and transferable knowledge more effectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "text", + "content": "For the architecture design, it is essential to 1) accommodate flexible task formats [27, 35, 71], ensuring seamless in-context learning, and 2) remain compatible with state-of-the-art models [33, 88] to fully leverage their strong generative priors. In this work, we find that the state-of-the-art image infilling model [33] has a consistent objective with our" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 281, + 555, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 364 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 364 + ], + "type": "text", + "content": "in-context learning based universal generative formulation. Specifically, we concatenate all input and output images together, where the objective of a task is to fill the output area. This alignment enables us to build our model upon advanced general-purpose infilling models without additional modifications, achieving powerful universal generation capabilities with minimal data and training costs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 365, + 556, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 556, + 508 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 556, + 508 + ], + "type": "text", + "content": "In this work, we propose a universal image generation framework, VisualCloze, which fine-tunes FLUX.1-Filldev [33] with interrelated tasks sampled from Graph200K to learn transferable knowledge and support visual in-context learning. As the number of in-context examples increases, we observe enhanced performances and reduced task confusion, enabling the model to support a broad spectrum of in-domain tasks, including conditional generation, image restoration, editing, style transfer, IP-preservation, and their combinations. On unseen tasks, the model also shows a certain degree of generalization ability, as shown in Fig. 2. In summary, our main contributions are as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 509, + 553, + 640 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 313, + 509, + 553, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 509, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 313, + 509, + 553, + 544 + ], + "type": "text", + "content": "- We propose an in-context learning based universal image generation framework that supports a wide range of indomain tasks and exhibits generalization to unseen ones." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 544, + 553, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 544, + 553, + 591 + ], + "spans": [ + { + "bbox": [ + 313, + 544, + 553, + 591 + ], + "type": "text", + "content": "- We design a graph-structured dataset, Graph200K, which constructs a compact task space, enabling flexible online task sampling and promoting the models to learn shared and transferable knowledge across tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "text", + "content": "- Our unified image generation formulation shares a consistent objective with the state-of-the-art infilling model, enabling exceptional performance through minimal tuning without modifying the structure." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 651, + 400, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 651, + 400, + 663 + ], + "spans": [ + { + "bbox": [ + 314, + 651, + 400, + 663 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 671, + 422, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 671, + 422, + 684 + ], + "spans": [ + { + "bbox": [ + 314, + 671, + 422, + 684 + ], + "type": "text", + "content": "2.1. Image Generation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "Recent advances in text-to-image generation have achieved remarkable performance, largely driven by the development" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "type": "text", + "content": "of autoregressive models [41, 58, 78] and diffusion models [2, 13, 15, 18, 24, 40, 42, 48, 51]. Among these, rectified flow transformers [15, 17, 33, 88] have shown great training efficiency and overall performance. Building on these foundational models, diverse applications have emerged, such as conditional generation [80], style transfer [64], and personalized generation [38]. More recently, universal models that address various tasks [35, 44, 83] have been explored. For example, unified models like OmniGen [71] leverage large vision language models to consolidate multiple tasks into a single framework. Similarly, UniReal [9] unifies image generation tasks as discontinuous video generation. However, they still face issues such as over-reliance on language instructions, isolation and sparsity of visual tasks, and architecture design accommodating flexible task formats. To address these issues, we propose a universal image generation framework that unifies generation tasks as image infilling. Through visual in-context learning and our Graph200K dataset that constructs a denser task space to learn transferable knowledge, our method alleviates ambiguity to support a diverse set of in-domain tasks and generalizes to tasks unseen during training." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 365, + 205, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 365, + 205, + 379 + ], + "spans": [ + { + "bbox": [ + 55, + 365, + 205, + 379 + ], + "type": "text", + "content": "2.2. Visual In-context Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 391, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 391, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 391, + 296, + 714 + ], + "type": "text", + "content": "Along with the emergence of large language models, such as GPT-3 [5], in-context learning [14] has been an effective approach to allow the language model to understand and perform complex tasks given a few demonstrations. Early works [21, 22] in vision modality propose image analogies to create an image filter from examples automatically. In recent years, leveraging inpainting model [3, 4, 82], masked image modeling [43, 66, 67], or vision-language model [1, 86], visual in-context learning is proposed to handle more tasks. However, they mainly focus on dense prediction [55, 59, 87] or visual understanding [63]. OmniGen [71] also leverages in-context learning to generalize to unseen domains, e.g., segmenting unseen concepts when the model has learned the segmentation task during training. However, it mainly focuses on simple tasks of dense prediction, and the gap between the unseen and training domains is still limited. Some recent works [34, 43, 60, 68] extend visual in-context learning to image generation, but they are still limited by simple tasks such as conditional generation and dense prediction. Moreover, the sparsity of visual tasks makes it difficult for models to learn transferable and overlapping knowledge across tasks, limiting the generation ability of in-context learning. In contrast, we introduce a graph-structured dataset that supports interrelated tasks and thus constructs a more dense task space, promoting the model to learn shared and transferable knowledge and enhance its adaptability." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 317, + 72, + 556, + 322 + ], + "blocks": [ + { + "bbox": [ + 317, + 72, + 556, + 322 + ], + "lines": [ + { + "bbox": [ + 317, + 72, + 556, + 322 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 556, + 322 + ], + "type": "image", + "image_path": "1732fc2ed7efbba343de0423288c803466a9f0e9b719b20d684c15538ef8510e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 332, + 555, + 389 + ], + "lines": [ + { + "bbox": [ + 313, + 332, + 555, + 389 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 555, + 389 + ], + "type": "text", + "content": "Figure 4. Illustration of the proposed Graph200K dataset. Each image is annotated for five meta-tasks, i.e., conditional generation, image restoration, image editing, IP preservation, and style transfer. Using these tasks, we can combine a wide range of complex tasks, such as the bottom of the figure." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 314, + 411, + 369, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 411, + 369, + 423 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 369, + 423 + ], + "type": "text", + "content": "3. Dataset" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 432, + 555, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 432, + 555, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 432, + 555, + 635 + ], + "type": "text", + "content": "Recent works [26, 44, 71] have made great progress in unified image generation. However, their generalization to unseen tasks remains highly limited. We partially attribute this issue to the sparsity and isolation of visual tasks, hindering the model from learning shared features across tasks and handling unseen ones. Moreover, weak correlations between tasks further hinder knowledge transfer, restricting the adaptability of models. Therefore, increasing task density or strengthening task inter-relations helps improve the generalization ability of models via a compact task distribution. In this paper, we take the Subject200K [61] dataset as a starting point and construct our Graph200K dataset by augmenting each image with 49 types of annotations spanning five meta-tasks. This enriched annotation space enables flexible construction of a wide range of related tasks by sampling and combining arbitrary subsets of annotations across different meta-tasks, as illustrated in Fig. 4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 647, + 515, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 515, + 659 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 515, + 659 + ], + "type": "text", + "content": "3.1. Graph-Structured Multi-Task Dataset" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "content": "In natural language processing, tasks overlap significantly, facilitating strong cross-task learning ability. In contrast, visual tasks are inherently distinct, posing challenges for vision models to achieve similar generalization ability via" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 178 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 178 + ], + "type": "text", + "content": "instruction tuning. To ease this issue, we introduce a Graph-Structured Multi-Task Dataset. As illustrated in Fig. 4 (a), given a text-to-image dataset, each image is treated as the central node of a graph, around which diverse task annotations are constructed, including those for various spatial conditions, degradations, image editing results, reference image for IP-preservation, and style transfer with various reference styles. The construction process for each task pair is detailed in the next section." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "text", + "content": "As shown in Fig. 4, each task annotation forms a bidirectional edge with the image. Thus, the graph is strongly connected, which means that for any two nodes, bidirectional paths exist between them. In other words, a generation task can be formulated as a path within the graph. The nodes along a path (except the end node) serve as condition images, which is analogous to the question in instruction fine-tuning, while the target image (the end node) plays the role of the answer. Specifically, there are 49 types of nodes in our Graph200K, and we sample up to 134 highly overlapping tasks, making the model learn more compact and shared representations across tasks. Moreover, it enriches the diversity and flexibility of our instruction fine-tuning data. For example, the path reference " + }, + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "text", + "content": " editing " + }, + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 56, + 182, + 298, + 373 + ], + "type": "text", + "content": " image corresponds to the task of image editing with reference, as shown in Fig. 4 bottom." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 384, + 178, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 384, + 178, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 384, + 178, + 396 + ], + "type": "text", + "content": "3.2. Dataset Construction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 402, + 295, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 402, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 402, + 295, + 462 + ], + "type": "text", + "content": "For convenience, we inherit subject-driven data from the Subjects200K [61]. Additionally, 32 different degradations are applied online to the images to acquire restoration data. We summarize the data construction methods in this section for the remaining three tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 474, + 295, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 295, + 629 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 295, + 629 + ], + "type": "text", + "content": "Conditional generation. Each image is paired with 12 distinct conditions generated by specialized models, including canny edges [6], HED edges [72], Hough lines [20], semantic segmentation maps [37], depth maps [74], shape normal maps [73], and human keypoints [7], following ControlNet [80]. This work extends the conditions by incorporating SAM2 [50] masks, foreground segmentation, and open-world boxes and masks. The foreground segmentation, derived from the RMBG [84], supports diverse tasks such as inpainting and foreground extraction. Open-world bounding boxes are generated through the grounding caption capability of Qwen2-VL [65], which are processed using SAM2 [50] to produce corresponding masks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "type": "text", + "content": "Style transfer. We transfer the style of images according to reference in both semantic-variant and semantic-invariant settings. Specifically, the semantic-invariant transfer adopts InstantStyle [64] to preserve the semantic content, while the semantic-variant transfer relies on FLUX.1-Redux-dev [33], using the style embeddings and depth as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 555, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 109 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 109 + ], + "type": "text", + "content": "conditions. For each image, we randomly generate five stylized versions. Mixing the two tasks pushes the model to follow the in-context examples better to avoid ambiguity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 118, + 555, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 118, + 555, + 275 + ], + "spans": [ + { + "bbox": [ + 313, + 118, + 555, + 275 + ], + "type": "text", + "content": "Image editing. We design two types of editing tasks, including background-variant and background-invariant editing. The background-invariant editing begins with localizing the subjects. Then, we leverage a large vision-language model, Qwen2-VL [65], to modify the image caption with a new object that replaces the original subject. The image, with the subject masked, is subsequently processed by the FLUX.1-Fill-dev [33] inpainting model to integrate the alternative object into the masked region. The above operation is repeated five times to enrich the dataset. For background-variant editing, the difference lies in the last step, which utilizes FLUX.1-Redux-dev [33] with depth as the condition and the modified caption as the text prompt." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 281, + 391, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 391, + 294 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 391, + 294 + ], + "type": "text", + "content": "3.3. Other Data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 299, + 556, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 299, + 556, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 299, + 556, + 443 + ], + "type": "text", + "content": "To further expand the range of tasks and enhance the generalization ability of models, we incorporate several open-source datasets during training, including VITON-HD [11] for virtual try-on and PhotoDoodle [28] for artistic image editing. For image editing tasks, we also extend the dataset with OmniEdit [69]. Specifically, two sub-tasks, i.e., object addition and removal, are used for training. The other editing tasks, such as attribute modification and environment change, are treated as unseen tasks to assess the generalization ability of the trained model. Furthermore, we leverage a portion of high-quality internal data, covering tasks of the drawing process [62] and multi-view generation [29]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 453, + 371, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 453, + 371, + 466 + ], + "spans": [ + { + "bbox": [ + 313, + 453, + 371, + 466 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 473, + 556, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 473, + 556, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 473, + 556, + 605 + ], + "type": "text", + "content": "This paper identifies the core challenges in building a universal image generation model, including the need for a clearly defined and generalizable task formulation, visual task sparsity, and the lack of a unified framework for multi-task learning. In the previous section, we addressed the issue of task sparsity by constructing the compact Graph200K dataset. Sec. 4.1 introduces visual in-context learning as the ideal paradigm for universal task formulation. Afterward, Sec. 4.2 considers the image infilling model a unified multi-task framework, achieving strong generalization capabilities with minimal cost." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 613, + 464, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 613, + 464, + 625 + ], + "spans": [ + { + "bbox": [ + 313, + 613, + 464, + 625 + ], + "type": "text", + "content": "4.1. Visual In-context Learning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "content": "Language instructions are usually used to specify the generation definition to handle multiple visual generation tasks with a single generative model. However, due to the gap between vision and language, the text comprehension ability of image generation models remains limited. This issue leads to task confusion [39] in existing universal generative models and weak generalization to unseen tasks. Inspired" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 77, + 296, + 203 + ], + "blocks": [ + { + "bbox": [ + 63, + 77, + 296, + 203 + ], + "lines": [ + { + "bbox": [ + 63, + 77, + 296, + 203 + ], + "spans": [ + { + "bbox": [ + 63, + 77, + 296, + 203 + ], + "type": "image", + "image_path": "1358ebd9de822d6bba28037c85b8d86380df5a48d034a0f61ea62114365d94fe.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "lines": [ + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "type": "text", + "content": "Figure 5. Concatenating images when applying position embeddings. The " + }, + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "type": "text", + "content": " images within " + }, + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 215, + 295, + 261 + ], + "type": "text", + "content": " in-context examples and the query are first concatenated horizontally. Then, these concatenated rows are concatenated temporally to handle mismatched aspect ratios." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 281, + 295, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 281, + 295, + 329 + ], + "spans": [ + { + "bbox": [ + 55, + 281, + 295, + 329 + ], + "type": "text", + "content": "by the success of few-shot learning on large language models [5], we recognize that visual context may serve as a more friendly task instruction for visual generative models, given their superior visual understanding capabilities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": "Therefore, in this paper, we re-propose visual in-context learning to build a universal and generalizable image generation system. For the sake of description, here we assume the image input-output of arbitrary conditional generation task as a query consisting of " + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "inline_equation", + "content": "L - 1" + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": " condition images and a blank target " + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": " to be completed by the model, i.e., " + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "inline_equation", + "content": "X = \\mathrm{concat}(\\{x_1,\\dots ,x_{L - 1},\\emptyset \\})" + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": ". In Sec. 5.1, we demonstrate that our method can be extended to more general scenarios, where it can generate images at arbitrary positions and in any quantity rather than just the single image at the end of the query. During training, we randomly provide up to " + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": " in-context examples, each containing " + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 329, + 296, + 556 + ], + "type": "text", + "content": " images as the query. This strategy ensures the generalization ability of models across different numbers of in-context examples. In our experiments, we show that providing in-context examples as task demonstrations not only helps alleviate task confusion and boost model performance across in-domain tasks [39], but also enhances the generalization ability on unseen tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 564, + 223, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 564, + 223, + 576 + ], + "spans": [ + { + "bbox": [ + 55, + 564, + 223, + 576 + ], + "type": "text", + "content": "4.2. Unified Multi-task Framework" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "content": "Unlike previous visual in-context learning methods that primarily focus on scenarios with a single image condition and a single context [43, 60], in this work, we aim to construct a unified framework capable of handling varying numbers of conditions and contexts, allowing for flexible adaptation to diverse tasks. For ease of description, we first assume all images processed by the model share the same size, " + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "inline_equation", + "content": "W \\times H" + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "content": ", and we extend to the scenario with mismatched aspect ratios at the end of this section. In this way, given " + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "content": " in-context examples and the query, each containing " + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "content": " images, all images can be concatenated into a complete grid-Layout image" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "text", + "content": "with a size of " + }, + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "inline_equation", + "content": "(L\\times W,(C + 1)\\times H)" + }, + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "text", + "content": ". Then, the model can complete a task by infilling the target grids based on the surrounding context, akin to solving visual cloze puzzles. Therefore, we build our unified framework, VisualCloze, based on the general image infilling architecture capable of handling multiple resolutions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 144, + 553, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 144, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 144, + 553, + 168 + ], + "type": "text", + "content": "Consistent with common diffusion-based infilling model designs, our model can be formulated as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 393, + 173, + 553, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 393, + 173, + 553, + 188 + ], + "spans": [ + { + "bbox": [ + 393, + 173, + 553, + 188 + ], + "type": "interline_equation", + "content": "\\hat {X} = f (X \\mid T, M), \\tag {1}", + "image_path": "986db1fa825df456e31cd4bee2b6f5fb6fccf2702cc504818830a6dd0e420542.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "spans": [ + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": " is the concatenated image, with the last grid left blank, " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": " is the language instruction, " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": " is the mask condition, and " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "\\hat{X}" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": " represents the inflated result. The mask " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": " is a binary matrix with the size of " + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "inline_equation", + "content": "(H \\times (C + 1), W \\times L)" + }, + { + "bbox": [ + 313, + 193, + 553, + 242 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 325, + 247, + 553, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 247, + 553, + 291 + ], + "spans": [ + { + "bbox": [ + 325, + 247, + 553, + 291 + ], + "type": "interline_equation", + "content": "M (i, j) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} i \\in [ H \\times (C - 1), H \\times C) \\\\ & \\text {a n d} j \\in [ W \\times (L - 1), W \\times L), \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {2}", + "image_path": "401a1f0cced2afe8598f4ec09f75d4b47f9f32f86402e3842fb51da4a1e0d20f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "spans": [ + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "inline_equation", + "content": "M(i,j) = 1" + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "text", + "content": " indicates that the pixel will be masked and generated by the infilling model. Equ. (2) masks the region in the last row and column, i.e., the target image. During training, we also randomly mask one of the first " + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "inline_equation", + "content": "L - 1" + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "text", + "content": " grids with a probability of 0.5, promoting reverse generation shown in Sec. 5.1. For the inference stage, we can crop " + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "inline_equation", + "content": "\\hat{X}" + }, + { + "bbox": [ + 313, + 297, + 555, + 381 + ], + "type": "text", + "content": " to obtain the target image easily." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 392, + 555, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 555, + 536 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 555, + 536 + ], + "type": "text", + "content": "Aligned optimization objective. A key benefit of this design is that our VisualCloze formulation shares a highly consistent objective with general image infilling models without architectural modifications or explicit input conditions. This consistency allows us to directly fine-tune advanced image infilling models using the newly constructed dataset while maximizing the utilization of the prior knowledge of foundation models. In contrast, existing task-specific models often require introducing additional learnable modules [38, 69] or adapting to extra condition inputs [61], which may compromise the native capabilities of the model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "content": "Language instructions. Note that the design of language instruction is also necessary for VisualCloze because it is responsible for defining the grid image layout, describing the caption of the image to be generated, and specifying the task intent when in-context examples are unavailable. In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the " + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "inline_equation", + "content": "(C + 1)\\times W" + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "content": " layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. The details about the instructions are available in Appendix A. By restructuring the three components " + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 546, + 555, + 715 + ], + "type": "text", + "content": " in Equ. (1), we achieve a unified multi-task framework for image generation with the general image infilling paradigm and support in-context learning." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "content": "Positional embedding. In the preceding section, all images are concatenated into a grid layout image and we can apply positional embedding (i.e., RoPE [57]) on this large image. However, a potential limitation lies in composing a grid image from in-context examples with varying aspect ratios. To overcome this issue, we leverage the 3D-RoPE in Flux.1-Fill-dev to concatenate the query and in-context examples along the temporal dimension, as shown in Fig. 5, effectively overcoming this issue without introducing any noticeable performance degradation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 199, + 189, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 199, + 189, + 213 + ], + "spans": [ + { + "bbox": [ + 55, + 199, + 189, + 213 + ], + "type": "text", + "content": "4.3. Implementation Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": "We use FLUX.1-Fill-dev [33] as our foundation model, considering its outstanding performance among open-source image infilling models. In this work, LoRA [25] is chosen to fine-tune the model instead of fully fine-tuning it to reduce training costs and preserve the capabilities of the foundation model. The resulting LoRA can also be fused with other LoRAs in the community, enabling more widespread applications. Specifically, we set the rank of LoRA as 256. The model is tuned for 20,000 iterations with an accumulated batch size of 64 on " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "8 \\times \\mathrm{A}100" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": " GPUs. We employ the AdamW optimizer with a learning rate of " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "1e^{-4}" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": ". Following FLUX.1-Fill-dev, we incorporate the lognorm noise strategy with dynamic time shifting. During training, the number of in-context examples is set up to 2 (i.e., " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": " as defined in Sec. 4.2), while " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": ", the number of images involved in a task, varies between 2 and 4 in the Graph200K dataset. During inference, the number of in-context examples can be generalized to a larger number. To balance computational efficiency, each image is resized to the area of " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 217, + 296, + 482 + ], + "type": "text", + "content": " before concatenating them into a grid layout. High-resolution outputs can be obtained in practical applications through simple post-up-scaling techniques [45]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 491, + 137, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 137, + 504 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 137, + 504 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 510, + 282, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 282, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 282, + 525 + ], + "type": "text", + "content": "5.1. Qualitative Analysis of In-context Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 528, + 296, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 528, + 296, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 528, + 296, + 588 + ], + "type": "text", + "content": "This section presents a series of experiments demonstrating the effectiveness of in-context learning across different tasks, especially those unseen during training. Based on our extensive experiments, we summarize four key findings that highlight the role of in-context learning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 597, + 201, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 597, + 201, + 609 + ], + "spans": [ + { + "bbox": [ + 72, + 597, + 201, + 609 + ], + "type": "text", + "content": "In-Context Learning Findings 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 618, + 280, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 618, + 280, + 641 + ], + "spans": [ + { + "bbox": [ + 70, + 618, + 280, + 641 + ], + "type": "text", + "content": "In-context learning can mitigate task confusion for seen tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "content": "Task ambiguity on seen tasks. The model occasionally experiences task confusion, failing to interpret the intended objective accurately, especially on dense prediction tasks. In-context learning effectively alleviates this issue" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 315, + 68, + 554, + 114 + ], + "blocks": [ + { + "bbox": [ + 315, + 68, + 554, + 114 + ], + "lines": [ + { + "bbox": [ + 315, + 68, + 554, + 114 + ], + "spans": [ + { + "bbox": [ + 315, + 68, + 554, + 114 + ], + "type": "image", + "image_path": "8f626ef355e0acb77913acad04167e89c0c0c4c5f84de1f1c391827b8f9846db.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 415, + 116, + 487, + 127 + ], + "lines": [ + { + "bbox": [ + 415, + 116, + 487, + 127 + ], + "spans": [ + { + "bbox": [ + 415, + 116, + 487, + 127 + ], + "type": "text", + "content": "(a) Image to Pose" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 315, + 128, + 553, + 173 + ], + "blocks": [ + { + "bbox": [ + 315, + 128, + 553, + 173 + ], + "lines": [ + { + "bbox": [ + 315, + 128, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 315, + 128, + 553, + 173 + ], + "type": "image", + "image_path": "89478554e2624c03002c38dc0b0b3797005ab2fb58fff9224ad2b02c4a50e563.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 315, + 185, + 553, + 231 + ], + "blocks": [ + { + "bbox": [ + 413, + 174, + 490, + 185 + ], + "lines": [ + { + "bbox": [ + 413, + 174, + 490, + 185 + ], + "spans": [ + { + "bbox": [ + 413, + 174, + 490, + 185 + ], + "type": "text", + "content": "(b) Image to Depth" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 315, + 185, + 553, + 231 + ], + "lines": [ + { + "bbox": [ + 315, + 185, + 553, + 231 + ], + "spans": [ + { + "bbox": [ + 315, + 185, + 553, + 231 + ], + "type": "image", + "image_path": "1ee7c7cb4e3fa3c1f80c8307b2f125ca56115352f10a661f2a448b98d7733511.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 415, + 231, + 488, + 241 + ], + "lines": [ + { + "bbox": [ + 415, + 231, + 488, + 241 + ], + "spans": [ + { + "bbox": [ + 415, + 231, + 488, + 241 + ], + "type": "text", + "content": "(c) Image to Edge" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 315, + 241, + 553, + 285 + ], + "blocks": [ + { + "bbox": [ + 315, + 241, + 553, + 285 + ], + "lines": [ + { + "bbox": [ + 315, + 241, + 553, + 285 + ], + "spans": [ + { + "bbox": [ + 315, + 241, + 553, + 285 + ], + "type": "image", + "image_path": "799d110235e6099c3b897aad35734e877d65549aefaf1c26cba0f9dbc70c90cf.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 287, + 492, + 298 + ], + "lines": [ + { + "bbox": [ + 408, + 287, + 492, + 298 + ], + "spans": [ + { + "bbox": [ + 408, + 287, + 492, + 298 + ], + "type": "text", + "content": "(d) Normal to Image" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 312, + 555, + 335 + ], + "lines": [ + { + "bbox": [ + 313, + 312, + 555, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 312, + 555, + 335 + ], + "type": "text", + "content": "Figure 6. In-context learning mitigates the task ambiguity in seen tasks. We show three results using different initial noises." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 357, + 555, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 555, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 555, + 513 + ], + "type": "text", + "content": "by providing task-specific demonstrations. For example, in Fig. 6 (a) and (c), the model may produce noisy results without in-context examples in pose estimation and edge detection, while increasing the number of in-context examples enhances the performance and stability. In depth estimation shown in Fig. 6 (b), in-context examples also improve the accuracy when the model originally makes inaccurate estimates, especially in distant areas. Additionally, in some tasks like conditional generation, we note that the model can generate satisfactory results stably even without in-context examples, as shown in Fig. 6 (d). However, the quantitative comparison in Tab. 1 still shows that using in-context learning can further improve the accuracy of task completion." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 331, + 522, + 460, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 522, + 460, + 534 + ], + "spans": [ + { + "bbox": [ + 331, + 522, + 460, + 534 + ], + "type": "text", + "content": "In-Context Learning Findings 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 329, + 544, + 538, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 544, + 538, + 580 + ], + "spans": [ + { + "bbox": [ + 329, + 544, + 538, + 580 + ], + "type": "text", + "content": "In-context learning supports generalization to unseen tasks, where providing more in-context examples could lead to more accurate generation." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "type": "text", + "content": "Generalization on unseen tasks. Beyond mitigating task confusion, in-context learning also enables the model to generalize to tasks unseen during training. Fig. 2 has shown the model can successfully generate frontal faces from side-view images and transfer editing instructions [8] through in-context learning, even though they are not encountered during training. Here, we present additional examples of unseen tasks. For instance, although the model is trained exclusively on image editing tasks involving object addi" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 67, + 178, + 217 + ], + "blocks": [ + { + "bbox": [ + 61, + 67, + 178, + 217 + ], + "lines": [ + { + "bbox": [ + 61, + 67, + 178, + 217 + ], + "spans": [ + { + "bbox": [ + 61, + 67, + 178, + 217 + ], + "type": "image", + "image_path": "4ea29b7e8c65442528e8fbcb8620b04d0255186bc45e1f44ad9998ad16841a57.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 219, + 301, + 262 + ], + "lines": [ + { + "bbox": [ + 59, + 219, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 59, + 219, + 301, + 262 + ], + "type": "text", + "content": "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: change the setting to a winter scene. <\\editing instruction>" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 185, + 67, + 300, + 217 + ], + "blocks": [ + { + "bbox": [ + 185, + 67, + 300, + 217 + ], + "lines": [ + { + "bbox": [ + 185, + 67, + 300, + 217 + ], + "spans": [ + { + "bbox": [ + 185, + 67, + 300, + 217 + ], + "type": "image", + "image_path": "a3d07fda5632ede382d0cef080fcaa8eead3e5397ac707336b1f1b0e9833199d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 67, + 428, + 219 + ], + "blocks": [ + { + "bbox": [ + 309, + 67, + 428, + 219 + ], + "lines": [ + { + "bbox": [ + 309, + 67, + 428, + 219 + ], + "spans": [ + { + "bbox": [ + 309, + 67, + 428, + 219 + ], + "type": "image", + "image_path": "987f48cc4fce95f082ca19623a70e2a6cdfc2c6aaacc2e358a28b72a09978326.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 220, + 550, + 262 + ], + "lines": [ + { + "bbox": [ + 309, + 220, + 550, + 262 + ], + "spans": [ + { + "bbox": [ + 309, + 220, + 550, + 262 + ], + "type": "text", + "content": "Task Prompt: In each row, a logical task is demonstrated to achieve [IMAGE2] a high-aesthetic image based on [IMAGE1] an aesthetically pleasing photograph. Each row shows a process to edit the image with the given editing instruction. The editing instruction in the last row is: turn the color of sunglasses to green. " + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 434, + 66, + 549, + 219 + ], + "blocks": [ + { + "bbox": [ + 434, + 66, + 549, + 219 + ], + "lines": [ + { + "bbox": [ + 434, + 66, + 549, + 219 + ], + "spans": [ + { + "bbox": [ + 434, + 66, + 549, + 219 + ], + "type": "image", + "image_path": "ff981a1756b7d4668df52429e2d3439d06ab30300cd9a9f1a69f32855e7aac24.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 59, + 307, + 171, + 449 + ], + "blocks": [ + { + "bbox": [ + 55, + 266, + 555, + 300 + ], + "lines": [ + { + "bbox": [ + 55, + 266, + 555, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 555, + 300 + ], + "type": "text", + "content": "Figure 7. Unseen Tasks: Although the image editing tasks seen by the model are only about object addition and object removal, it can still generalize to other types of editing tasks, such as environment modification (Left) and attribute transformation (Right), through in-context learning. More unseen tasks are shown in Fig. 2." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 307, + 171, + 449 + ], + "lines": [ + { + "bbox": [ + 59, + 307, + 171, + 449 + ], + "spans": [ + { + "bbox": [ + 59, + 307, + 171, + 449 + ], + "type": "image", + "image_path": "15e8bf13c9a2330b000578b4431ba8c8b856240daefb6265a36cbff561e2c67d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 178, + 307, + 292, + 449 + ], + "blocks": [ + { + "bbox": [ + 178, + 307, + 292, + 449 + ], + "lines": [ + { + "bbox": [ + 178, + 307, + 292, + 449 + ], + "spans": [ + { + "bbox": [ + 178, + 307, + 292, + 449 + ], + "type": "image", + "image_path": "a8cebf66282c6a6d1bb8700a95d9f356e6055f9f9410deb217518b384a3a6b78.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 455, + 295, + 499 + ], + "lines": [ + { + "bbox": [ + 55, + 455, + 295, + 499 + ], + "spans": [ + { + "bbox": [ + 55, + 455, + 295, + 499 + ], + "type": "text", + "content": "Figure 8. Unseen Tasks: VisualCloze is capable of performing multi-subject driven generation [70], even though the model was only exposed to single subject-driven generation tasks during training. Best viewed by zooming in." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 529, + 295, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 529, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 55, + 529, + 295, + 625 + ], + "type": "text", + "content": "tion and removal, it still generalizes to other types of editing tasks, such as environment changes and attribute modifications, as shown in Fig. 7. Furthermore, as demonstrated in Fig. 8, the model, trained solely on single-subject generation, can generate images preserving identities of multiple subjects. These results highlight that in-context learning is an effective guidance mechanism, enabling adaptation to novel tasks without retraining." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 72, + 643, + 201, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 643, + 201, + 656 + ], + "spans": [ + { + "bbox": [ + 72, + 643, + 201, + 656 + ], + "type": "text", + "content": "In-Context Learning Findings 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 665, + 280, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 665, + 280, + 701 + ], + "spans": [ + { + "bbox": [ + 70, + 665, + 280, + 701 + ], + "type": "text", + "content": "In-context learning enables task unification, an unseen strategy that consolidating sub-tasks into a single step and generating intermediate results." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 310, + 555, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 310, + 555, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 310, + 555, + 477 + ], + "type": "text", + "content": "Multi-task consolidation. Meanwhile, we also find that through in-context learning, we can consolidate multiple tasks into a single execution step, which can be viewed as another form of unseen task. Fig. 3 has shown two examples, where we 1) merge conditional generation and relighting shown on the left and 2) perform depth estimation, surface normal estimation, and edge detection simultaneously shown on the right. Similarly, Fig. 11 illustrates how we can combine multiple conditions for conditional generation to achieve finer control. For instance, generating a portrait based on keypoints provides only rough information about the location and body pose. In such cases, contour conditions can be used to control the attributes of other visual elements." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 330, + 498, + 460, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 498, + 460, + 511 + ], + "spans": [ + { + "bbox": [ + 330, + 498, + 460, + 511 + ], + "type": "text", + "content": "In-Context Learning Findings 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 329, + 520, + 538, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 520, + 538, + 568 + ], + "spans": [ + { + "bbox": [ + 329, + 520, + 538, + 568 + ], + "type": "text", + "content": "Different in-context learning examples lead to varying effects, where examples that can better convey mission intent can achieve better and more stable generation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 555, + 715 + ], + "type": "text", + "content": "Varying effects of different in-context examples. Following prior works [46, 52] on the prompt selection, we also find that different in-context examples could impact the generation quality. Specifically, it is crucial that in-context examples provide correct and strong guidance about the task intention. For example, as shown in Fig. 10 (left), when the side faces are more towards the front than in Fig. 10 (right), the success rate of correctly generating frontal faces has dropped dramatically." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 71, + 135, + 129 + ], + "blocks": [ + { + "bbox": [ + 62, + 77, + 74, + 183 + ], + "lines": [ + { + "bbox": [ + 62, + 77, + 74, + 183 + ], + "spans": [ + { + "bbox": [ + 62, + 77, + 74, + 183 + ], + "type": "text", + "content": "Two In-Context Examples" + } + ] + } + ], + "index": 0, + "angle": 270, + "type": "image_caption" + }, + { + "bbox": [ + 76, + 71, + 135, + 129 + ], + "lines": [ + { + "bbox": [ + 76, + 71, + 135, + 129 + ], + "spans": [ + { + "bbox": [ + 76, + 71, + 135, + 129 + ], + "type": "image", + "image_path": "e39dbb03405300be8ff7301a386eae5438f35df95ee9616ca7db3a89cd56f42c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 138, + 71, + 197, + 129 + ], + "blocks": [ + { + "bbox": [ + 138, + 71, + 197, + 129 + ], + "lines": [ + { + "bbox": [ + 138, + 71, + 197, + 129 + ], + "spans": [ + { + "bbox": [ + 138, + 71, + 197, + 129 + ], + "type": "image", + "image_path": "de94d01ad7f947dfba9731be7dcb4d5870a68b125e991e5c8ac5a7a4d8c2806a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 201, + 71, + 261, + 129 + ], + "blocks": [ + { + "bbox": [ + 201, + 71, + 261, + 129 + ], + "lines": [ + { + "bbox": [ + 201, + 71, + 261, + 129 + ], + "spans": [ + { + "bbox": [ + 201, + 71, + 261, + 129 + ], + "type": "image", + "image_path": "d394f8d589edbf416c6d4cc2f58660a637f157bfe6a4370510efd32b80073ad3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 76, + 130, + 135, + 189 + ], + "blocks": [ + { + "bbox": [ + 76, + 130, + 135, + 189 + ], + "lines": [ + { + "bbox": [ + 76, + 130, + 135, + 189 + ], + "spans": [ + { + "bbox": [ + 76, + 130, + 135, + 189 + ], + "type": "image", + "image_path": "4ee44494de36fcbc77ba11e187af703a9e635c2d744149ecf1d844c025abc15e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 138, + 130, + 198, + 189 + ], + "blocks": [ + { + "bbox": [ + 138, + 130, + 198, + 189 + ], + "lines": [ + { + "bbox": [ + 138, + 130, + 198, + 189 + ], + "spans": [ + { + "bbox": [ + 138, + 130, + 198, + 189 + ], + "type": "image", + "image_path": "fa356cf4486c8e710d3cd2b102a59b17210dd2dd3bf6f93b711f8c0981a2c386.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 201, + 130, + 261, + 189 + ], + "blocks": [ + { + "bbox": [ + 201, + 130, + 261, + 189 + ], + "lines": [ + { + "bbox": [ + 201, + 130, + 261, + 189 + ], + "spans": [ + { + "bbox": [ + 201, + 130, + 261, + 189 + ], + "type": "image", + "image_path": "e7728427762ce951e655fcb0171f69c18b5b831a8664b2f78df2e160c0273f98.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 75, + 191, + 135, + 233 + ], + "blocks": [ + { + "bbox": [ + 75, + 191, + 135, + 233 + ], + "lines": [ + { + "bbox": [ + 75, + 191, + 135, + 233 + ], + "spans": [ + { + "bbox": [ + 75, + 191, + 135, + 233 + ], + "type": "image", + "image_path": "89597e91d21602ff6a99bb7a814a9b6a7ae72aa4a4b9109d0daebd79103fa3bf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 245, + 271, + 281 + ], + "lines": [ + { + "bbox": [ + 58, + 245, + 271, + 281 + ], + "spans": [ + { + "bbox": [ + 58, + 245, + 271, + 281 + ], + "type": "text", + "content": "Task Prompt: In each row, a method uses[IMAGE1] gray-shaded depth map with distinct edges, [IMAGE2] Artistically rendered content for generating [IMAGE3] High-definition picture in a unique art style." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 137, + 191, + 198, + 233 + ], + "blocks": [ + { + "bbox": [ + 137, + 191, + 198, + 233 + ], + "lines": [ + { + "bbox": [ + 137, + 191, + 198, + 233 + ], + "spans": [ + { + "bbox": [ + 137, + 191, + 198, + 233 + ], + "type": "image", + "image_path": "85382c353d3a3f90395a83ff0e3ae47e130ef00c2047f726cee366e12d0254f9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 201, + 191, + 261, + 233 + ], + "blocks": [ + { + "bbox": [ + 201, + 191, + 261, + 233 + ], + "lines": [ + { + "bbox": [ + 201, + 191, + 261, + 233 + ], + "spans": [ + { + "bbox": [ + 201, + 191, + 261, + 233 + ], + "type": "image", + "image_path": "42c0b7385159af3b1c2f9afba48f8e34b14069b6109c13e3b2601e3948a070c1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 280, + 71, + 354, + 129 + ], + "blocks": [ + { + "bbox": [ + 280, + 71, + 354, + 129 + ], + "lines": [ + { + "bbox": [ + 280, + 71, + 354, + 129 + ], + "spans": [ + { + "bbox": [ + 280, + 71, + 354, + 129 + ], + "type": "image", + "image_path": "498e06995dd22674b4ab75b8ab7f3a35d95bd305b7e863f4d09253954c32a39e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 359, + 72, + 417, + 129 + ], + "blocks": [ + { + "bbox": [ + 359, + 72, + 417, + 129 + ], + "lines": [ + { + "bbox": [ + 359, + 72, + 417, + 129 + ], + "spans": [ + { + "bbox": [ + 359, + 72, + 417, + 129 + ], + "type": "image", + "image_path": "62c557f0ce9f35dcb3cc7754d95c24e511eefd8b42b59b819beda1a5208aa49f.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 422, + 72, + 481, + 129 + ], + "blocks": [ + { + "bbox": [ + 422, + 72, + 481, + 129 + ], + "lines": [ + { + "bbox": [ + 422, + 72, + 481, + 129 + ], + "spans": [ + { + "bbox": [ + 422, + 72, + 481, + 129 + ], + "type": "image", + "image_path": "2b4a07349a76312f2d32216d6c874a02a3535c0ad37c2908e984eb13cb6b7287.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 486, + 71, + 545, + 129 + ], + "blocks": [ + { + "bbox": [ + 486, + 71, + 545, + 129 + ], + "lines": [ + { + "bbox": [ + 486, + 71, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 486, + 71, + 545, + 129 + ], + "type": "image", + "image_path": "f1b73d86659bf48d8dddbe2eb30df4180c1a1c124db2fc18321bd544c2857b04.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 295, + 130, + 355, + 188 + ], + "blocks": [ + { + "bbox": [ + 295, + 130, + 355, + 188 + ], + "lines": [ + { + "bbox": [ + 295, + 130, + 355, + 188 + ], + "spans": [ + { + "bbox": [ + 295, + 130, + 355, + 188 + ], + "type": "image", + "image_path": "7fb4c0fcd85036ac8c873ead412249e9d545ed172f14473177f6964763a4cab4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 359, + 130, + 417, + 189 + ], + "blocks": [ + { + "bbox": [ + 359, + 130, + 417, + 189 + ], + "lines": [ + { + "bbox": [ + 359, + 130, + 417, + 189 + ], + "spans": [ + { + "bbox": [ + 359, + 130, + 417, + 189 + ], + "type": "image", + "image_path": "2990a4bc71a56d8004954f8fd263cee7d26ac3fa3581635ac306f3f80eda9c2d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 422, + 130, + 481, + 189 + ], + "blocks": [ + { + "bbox": [ + 422, + 130, + 481, + 189 + ], + "lines": [ + { + "bbox": [ + 422, + 130, + 481, + 189 + ], + "spans": [ + { + "bbox": [ + 422, + 130, + 481, + 189 + ], + "type": "image", + "image_path": "14138013e9b9475875a02acdbd4b44acb2fbf1da5f2bf9232c5038dd69054d61.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 486, + 130, + 545, + 189 + ], + "blocks": [ + { + "bbox": [ + 486, + 130, + 545, + 189 + ], + "lines": [ + { + "bbox": [ + 486, + 130, + 545, + 189 + ], + "spans": [ + { + "bbox": [ + 486, + 130, + 545, + 189 + ], + "type": "image", + "image_path": "0a0b70f5913e7a1add33c44846032d35427d7d7d84ba92ea0890c986c695058a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 294, + 191, + 355, + 242 + ], + "blocks": [ + { + "bbox": [ + 294, + 191, + 355, + 242 + ], + "lines": [ + { + "bbox": [ + 294, + 191, + 355, + 242 + ], + "spans": [ + { + "bbox": [ + 294, + 191, + 355, + 242 + ], + "type": "image", + "image_path": "f76662e5dc81ba0b4ab6e409674dd2e1e3a76aed9e84f2152cce4ee9785b542b.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 274, + 245, + 551, + 281 + ], + "lines": [ + { + "bbox": [ + 274, + 245, + 551, + 281 + ], + "spans": [ + { + "bbox": [ + 274, + 245, + 551, + 281 + ], + "type": "text", + "content": "Task Prompt: Every row demonstrates how to transform [IMAGE1] an image with vivid details into [IMAGE2] gray-scale depth map with clear object boundaries, [IMAGE3] rgb normal map for bump mapping effects, [IMAGE4] soft-edged map from hed detection through a logical approach." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 359, + 191, + 418, + 242 + ], + "blocks": [ + { + "bbox": [ + 359, + 191, + 418, + 242 + ], + "lines": [ + { + "bbox": [ + 359, + 191, + 418, + 242 + ], + "spans": [ + { + "bbox": [ + 359, + 191, + 418, + 242 + ], + "type": "image", + "image_path": "014dfd4d96816ceef7a915c7b8d206169fb9cffce49a5fdf83d9b968c33943a9.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 421, + 191, + 482, + 243 + ], + "blocks": [ + { + "bbox": [ + 421, + 191, + 482, + 243 + ], + "lines": [ + { + "bbox": [ + 421, + 191, + 482, + 243 + ], + "spans": [ + { + "bbox": [ + 421, + 191, + 482, + 243 + ], + "type": "image", + "image_path": "3a00376ddd05ce1a246de4058ad55d3b6f886ddb698692c6e84a05e3651aa199.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 486, + 191, + 545, + 242 + ], + "blocks": [ + { + "bbox": [ + 486, + 191, + 545, + 242 + ], + "lines": [ + { + "bbox": [ + 486, + 191, + 545, + 242 + ], + "spans": [ + { + "bbox": [ + 486, + 191, + 545, + 242 + ], + "type": "image", + "image_path": "22dae11fb3a08b4f3574437e9a2aa7294c551e476b059191448618fdacb60f8e.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 58, + 327, + 168, + 430 + ], + "blocks": [ + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "lines": [ + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "type": "text", + "content": "Figure 9. Unseen Tasks: Through in-context learning, we can perform reverse generation from targets to conditions. For example, (a) decomposing the layout and style from a stylized image and (b) inferring the image, depth, and surface normal simultaneously from an edge map, which is the reverse task of Fig. 3 (Left)." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 327, + 168, + 430 + ], + "lines": [ + { + "bbox": [ + 58, + 327, + 168, + 430 + ], + "spans": [ + { + "bbox": [ + 58, + 327, + 168, + 430 + ], + "type": "image", + "image_path": "e26d101199eaf66301f550024e7b7339d17cc4b47263a479a79c314e8bd86ac4.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 176, + 327, + 287, + 430 + ], + "blocks": [ + { + "bbox": [ + 176, + 327, + 287, + 430 + ], + "lines": [ + { + "bbox": [ + 176, + 327, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 176, + 327, + 287, + 430 + ], + "type": "image", + "image_path": "980324990f838bb09af21c545c77b6a3c430fbecb9f7cb3a9d273e98971dd01f.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 58, + 441, + 171, + 491 + ], + "blocks": [ + { + "bbox": [ + 58, + 441, + 171, + 491 + ], + "lines": [ + { + "bbox": [ + 58, + 441, + 171, + 491 + ], + "spans": [ + { + "bbox": [ + 58, + 441, + 171, + 491 + ], + "type": "image", + "image_path": "0b92ba007fe2e0cfea911db6cb5b8efc794bf013ec60d64dbfbae9c61a97329d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 502, + 295, + 545 + ], + "lines": [ + { + "bbox": [ + 55, + 502, + 295, + 545 + ], + "spans": [ + { + "bbox": [ + 55, + 502, + 295, + 545 + ], + "type": "text", + "content": "Figure 10. Illustration of the impact of different in-context examples on in-context learning. In the second example on the left, the left and right faces are too biased towards the front, so they do not show the core goal of the task intention." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 179, + 441, + 288, + 490 + ], + "blocks": [ + { + "bbox": [ + 179, + 441, + 288, + 490 + ], + "lines": [ + { + "bbox": [ + 179, + 441, + 288, + 490 + ], + "spans": [ + { + "bbox": [ + 179, + 441, + 288, + 490 + ], + "type": "image", + "image_path": "db55729e9d4318e9841efd8e93703898949c599ced5c6934a32bdbe22fd9345e.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "bbox": [ + 72, + 569, + 201, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 569, + 201, + 582 + ], + "spans": [ + { + "bbox": [ + 72, + 569, + 201, + 582 + ], + "type": "text", + "content": "In-Context Learning Findings 5" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 70, + 590, + 280, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 590, + 280, + 627 + ], + "spans": [ + { + "bbox": [ + 70, + 590, + 280, + 627 + ], + "type": "text", + "content": "In-context learning can guide bilateral generation, even for the reverse process that is unseen during training." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 296, + 713 + ], + "type": "text", + "content": "Bilateral generation. In addition to generating the target from a set of given conditions, our model also shows the capability of reverse generation, i.e., inferring the underlying conditions from the target. Although our model has randomly treated one condition image as the target when" + } + ] + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 322, + 327, + 393, + 399 + ], + "blocks": [ + { + "bbox": [ + 322, + 327, + 393, + 399 + ], + "lines": [ + { + "bbox": [ + 322, + 327, + 393, + 399 + ], + "spans": [ + { + "bbox": [ + 322, + 327, + 393, + 399 + ], + "type": "image", + "image_path": "80e71d879c8601d4e472bffb296a35879303834a95416596e2faa6f860bb7464.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 399, + 328, + 468, + 398 + ], + "blocks": [ + { + "bbox": [ + 399, + 328, + 468, + 398 + ], + "lines": [ + { + "bbox": [ + 399, + 328, + 468, + 398 + ], + "spans": [ + { + "bbox": [ + 399, + 328, + 468, + 398 + ], + "type": "image", + "image_path": "35aef2ef545a9a8891b9ac59dd5f26762d506a0b5d07236fa202dbfa634040af.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 475, + 328, + 545, + 398 + ], + "blocks": [ + { + "bbox": [ + 475, + 328, + 545, + 398 + ], + "lines": [ + { + "bbox": [ + 475, + 328, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 475, + 328, + 545, + 398 + ], + "type": "image", + "image_path": "c3a5f4d20b1af78021b3d3cd67f5d643151115213ced5cfbaf30a97185d7c53f.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 321, + 400, + 394, + 474 + ], + "blocks": [ + { + "bbox": [ + 321, + 400, + 394, + 474 + ], + "lines": [ + { + "bbox": [ + 321, + 400, + 394, + 474 + ], + "spans": [ + { + "bbox": [ + 321, + 400, + 394, + 474 + ], + "type": "image", + "image_path": "f2d484af116d0c9a2212612f63e0c234e814fabe0c58948af994a5cc1b020c38.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 475, + 548, + 512 + ], + "lines": [ + { + "bbox": [ + 318, + 475, + 548, + 512 + ], + "spans": [ + { + "bbox": [ + 318, + 475, + 548, + 512 + ], + "type": "text", + "content": "Task Prompt: Every row demonstrates how to transform [IMAGE1] human pose with colored lines for bone structure and [IMAGE2] canny map with sharp white edges and dark into [IMAGE3] a visually striking and clear picture through a logical approach." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 313, + 522, + 555, + 556 + ], + "lines": [ + { + "bbox": [ + 313, + 522, + 555, + 556 + ], + "spans": [ + { + "bbox": [ + 313, + 522, + 555, + 556 + ], + "type": "text", + "content": "Figure 11. Unseen Tasks: Unseen combinations of multiple tasks. For conditional generation, we integrate multiple conditions achieve more precise control. More examples are shown in Fig. 3." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 398, + 400, + 470, + 474 + ], + "blocks": [ + { + "bbox": [ + 398, + 400, + 470, + 474 + ], + "lines": [ + { + "bbox": [ + 398, + 400, + 470, + 474 + ], + "spans": [ + { + "bbox": [ + 398, + 400, + 470, + 474 + ], + "type": "image", + "image_path": "28630a84114fa8d401e0e3eaea7021dcfa6d6f65411c23b243684d03f78786c9.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 474, + 400, + 547, + 473 + ], + "blocks": [ + { + "bbox": [ + 474, + 400, + 547, + 473 + ], + "lines": [ + { + "bbox": [ + 474, + 400, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 474, + 400, + 547, + 473 + ], + "type": "image", + "image_path": "93b3d7c15a34d4cea9903e6ba78d973fed980a709027e508c9d31569037fbc3e.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "content": "training as described in Sec. 4.2, it can generalize to a more challenging and unseen setting during inference, i.e., inferring all conditional images from only the target image. For instance, as illustrated in Fig. 9 (left), the model can reverse-engineer both the original and the style reference images given a stylized image, demonstrating the ability to disentangle the content and style representations. Similarly, as shown in Fig. 9 (right), the model can generate the corresponding real image, depth estimation, and surface normal estimation from an edge image, representing the inverse task of Fig. 3 (left). The ability to perform such" + } + ] + } + ], + "index": 41 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 42 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 555, + 383 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 555, + 383 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 555, + 383 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 555, + 383 + ], + "type": "table", + "html": "
ConditionMethodContextControllabilityQualityText Consistency
F1 ↑RMSE ↓FID [23] ↓SSIM ↑MAN-IQA [75] ↑MUSIQ [30] ↑CLIP-Score [49] ↑
CannyControlNet [80]0.13-46.060.340.3145.4534.10
OminiControl [61]0.47-29.580.610.4461.4034.40
OneDiffusion [35]0.39-32.760.550.4659.9934.99
OmniGen [71]0.43-51.580.470.4762.6633.66
Oursdev00.39-30.360.610.4861.1335.03
Oursfill00.35-30.600.550.4964.3934.98
Oursfill10.36-31.340.550.4964.1234.96
Oursfill20.36-31.150.560.4964.0834.85
DepthControlNet [80]-23.7036.830.410.4460.1734.49
OminiControl [61]-21.4436.230.520.4460.1834.08
OneDiffusion [35]-10.3539.030.490.4960.4934.71
OmniGen [71]-15.0786.080.260.4964.9029.72
Oursdev0-25.0642.140.530.4658.9534.80
Oursfill0-10.3133.880.540.4864.8535.10
Oursfill1-9.9134.440.540.4964.3234.95
Oursfill2-9.6834.880.540.4864.2934.89
DeblurControlNet [80]-37.8253.280.490.4561.9233.80
OminiControl [61]-19.7026.170.850.4560.7034.53
OneDiffusion [35]-------
OmniGen [71]-------
Oursdev0-25.0356.760.740.3846.6833.52
Oursfill0-26.5340.590.740.4659.6234.56
Oursfill1-25.8736.930.760.4861.5834.82
Oursfill2-25.5736.280.760.4861.7734.82
", + "image_path": "3490ba6341fe53dae081576bfafd9498c242ee534c5c93aff12f356ab80d5505.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 65, + 422, + 288, + 536 + ], + "blocks": [ + { + "bbox": [ + 55, + 391, + 555, + 415 + ], + "lines": [ + { + "bbox": [ + 55, + 391, + 555, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 391, + 555, + 415 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on conditioning generation and image restoration. The methods that train a specialist for each task are marked as gray color. Except for these methods, the best method is bolded, and the second best method is underlined." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 422, + 288, + 536 + ], + "lines": [ + { + "bbox": [ + 65, + 422, + 288, + 536 + ], + "spans": [ + { + "bbox": [ + 65, + 422, + 288, + 536 + ], + "type": "table", + "html": "
MethodContextDINOv2CLIP-ICLIP-T
OminiControl [61]73.1787.7033.53
OneDiffusion [35]73.8886.9134.85
OmniGen [71]67.7383.4334.53
Oursdev078.0587.6835.06
Oursfill080.4189.6335.16
Oursfill179.3389.2235.02
Oursfill280.3289.3635.01
", + "image_path": "2973fc1c963afae2723902ff6deb0c428c16ec2f262084265fd41eb908c04ca6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 612, + 295, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 612, + 295, + 649 + ], + "spans": [ + { + "bbox": [ + 55, + 612, + 295, + 649 + ], + "type": "text", + "content": "reverse tasks highlights the flexibility and robustness in understanding complex relationships between different types of image representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 658, + 141, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 658, + 141, + 671 + ], + "spans": [ + { + "bbox": [ + 55, + 658, + 141, + 671 + ], + "type": "text", + "content": "5.2. Main Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "type": "text", + "content": "We compare our method with universal generative models, including OmniGen [71] and OneDiffusion [35], as well as specialized models, such as ControlNet [80] and Omni-" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 318, + 422, + 552, + 496 + ], + "blocks": [ + { + "bbox": [ + 55, + 545, + 295, + 590 + ], + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 590 + ], + "type": "text", + "content": "Table 2. Quantitative comparison for subject-driven image generation. We report clip scores on text alignment and style consistency. Specialists are shaded in gray. Among the remaining methods, the best is emphasized in bold, while the second best is underlined." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 422, + 552, + 496 + ], + "lines": [ + { + "bbox": [ + 318, + 422, + 552, + 496 + ], + "spans": [ + { + "bbox": [ + 318, + 422, + 552, + 496 + ], + "type": "table", + "html": "
text↑image↑
InstantStyle [64]0.270.60
OmniGen [71]0.270.52
Oursdev0.300.53
Oursfill0.290.55
", + "image_path": "a120bae33646642c1ee693c6f60b898e6ee0090f7d5c6d025c78f8129c24495c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 504, + 555, + 550 + ], + "lines": [ + { + "bbox": [ + 313, + 504, + 555, + 550 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 555, + 550 + ], + "type": "text", + "content": "Table 3. Quantitative comparison for style transfer. We report CLIP scores on text alignment and style consistency. The specialists are indicated in gray. Among the others, the top-performing one is highlighted in bold, and the second best is underlined." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 566, + 555, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 566, + 555, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 566, + 555, + 628 + ], + "type": "text", + "content": "Control [61]. The details of the evaluation metrics are provided in Appendix C. Additionally, we fine-tune FLUX.1-dev [33] using the same settings as FLUX.1-Fill-dev for comparison and refer to the tuned models as Oursdev and Oursfill. The details of Oursdev are shown in Appendix B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "content": "For conditional generation and image restoration, we evaluate the models based on three criteria, i.e., controllability, visual quality, and text consistency, following the evaluation approach of OminiControl [61]. As shown in Tab. 1, our framework demonstrates comparable controllability to existing universal methods while achieving superior visual quality and text consistency. Compared to spe" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 67, + 294, + 216 + ], + "blocks": [ + { + "bbox": [ + 56, + 67, + 294, + 216 + ], + "lines": [ + { + "bbox": [ + 56, + 67, + 294, + 216 + ], + "spans": [ + { + "bbox": [ + 56, + 67, + 294, + 216 + ], + "type": "image", + "image_path": "66de4842ae368391271cd943215047905ba91ce955a55fbb50f891d087e2be07.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 228, + 295, + 251 + ], + "lines": [ + { + "bbox": [ + 55, + 228, + 295, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 228, + 295, + 251 + ], + "type": "text", + "content": "Figure 12. Comparison between Flux.1-dev (Oursdev) and Flux.1-Fill-dev (Oursfill)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 271, + 295, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 271, + 295, + 295 + ], + "spans": [ + { + "bbox": [ + 55, + 271, + 295, + 295 + ], + "type": "text", + "content": "cialized methods, our model performs on par with the best results and even outperforms them on the depth-to-image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "spans": [ + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "text", + "content": "In the style transfer task, we measure text consistency and style alignment using the CLIP [49] model. As reported in Tab. 3, our method outperforms OmniGen [71] by " + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "text", + "content": " in text alignment and style consistency, respectively. Even when compared with InstantStyle-Plus [81], a specialized model, we achieve a " + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 55, + 296, + 295, + 379 + ], + "type": "text", + "content": " improvement in text consistency, with only a slight decrease in style alignment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "text", + "content": "Furthermore, we evaluate the models on subject-driven image generation and report semantic alignment using the DINOv2 [47], CLIP-I [49], and CLIP-T [49] scores. Across all these metrics, our method consistently delivers improvements, as shown in Tab. 2. For example, compared to the specialized model OminiControl [61], we achieve improvements of " + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "inline_equation", + "content": "7.15\\%" + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "inline_equation", + "content": "1.66\\%" + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "inline_equation", + "content": "1.48\\%" + }, + { + "bbox": [ + 55, + 380, + 295, + 463 + ], + "type": "text", + "content": " in these three scores." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 474, + 295, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 295, + 581 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 295, + 581 + ], + "type": "text", + "content": "Advantages of the infilling model. Our method (Oursfill) is built on FLUX.1-Fill-dev [33], which shares the same objective as our unified image generation framework. To verify its effectiveness, we also fine-tune Fill.1-dev [33] (Oursdev) using identical settings. Unlike Oursfill, which requires no modifications, Oursdev necessitates model adaptations for universal image generation, as shown in Appendix B. Despite its simplicity, Oursfill achieves superior performance across multiple tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": "As shown in Tab. 1, " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{dev}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " achieves a higher F1 score than " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " in the canny-to-image generation. However, in other tasks, " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " demonstrates a significant advantage. For instance, in the depth-to-image generation, " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " reduces RMSE from 25.06 to 10.31. In the deblurring task, " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " achieves superior quality by lowering RMSE while maintaining a higher SSIM. In subject-driven image generation, Tab. 2 shows that " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " consistently outperforms " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{dev}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": ". Additionally, in semantic-invariant style transfer, " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{fill}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": " delivers comparable performance to " + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\text{Ours}_{\\text{dev}}" + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": ", as shown in Tab. 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "Fig. 12 presents a visual comparison, where Oursfill demonstrates clear advantages over Oursdev. Notably, in the depth-to-image generation, images produced by Oursdev frequently exhibit diagonal streak artifacts, which significantly degrade visual fidelity. Considering the advantages in performance, visual quality, and architectural efficiency, Oursfill stands out as the superior model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 167, + 555, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 167, + 555, + 334 + ], + "spans": [ + { + "bbox": [ + 313, + 167, + 555, + 334 + ], + "type": "text", + "content": "Quantitative comparison on in-context learning. Here, we further analyze the impact of in-context learning on seen tasks. Tab. 1 demonstrates the impact of in-context learning on different image generation tasks. Under the canny condition, our method without in-context examples achieves an FID of 30.60, which improves to 31.15 with two in-context examples. When conditioned on depth, the RMSE decreases from 10.31 to 9.68 as the number of in-context examples increases, indicating enhanced structural consistency. Similarly, in the deblurring task, RMSE decreases from 26.53 to 25.57, reflecting improved fidelity to the original content. These results highlight in-context learning as an effective guidance mechanism, enabling the model to better align with the task intent." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "type": "text", + "content": "6. Limitations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 365, + 554, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 554, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 554, + 461 + ], + "type": "text", + "content": "While our model demonstrates strong stability across most in-domain tasks, it still exhibits some instability in specific tasks, such as object removal. This limitation suggests that the performance is sensitive to certain task characteristics. Additionally, the stability of the model on unseen tasks is still insufficient. Apart from the difficulty of the task and the difference with seen tasks, ambiguous in-context examples may also lead to less stable results, as discussed in Sec. 5.1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 478, + 388, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 478, + 388, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 478, + 388, + 491 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "content": "In this work, we propose VisualCloze, a universal image generation framework that addresses key challenges in existing methods, including generalizable instruction design, appropriate task distributions, and unified architectural design. Rather than relying solely on language-based instructions to convey task intent, we re-propose visual in-context learning, enabling the model to learn tasks from a few demonstrations. This approach improves generalization to unseen tasks and reduces task ambiguity. To overcome the sparsity of visual task distributions, which limits the learning of transferable knowledge, we construct Graph200K, a graph-structured dataset that establishes interrelated tasks. In this compact task space, the model is promoted to learn transferable representations and improve adaptability. Meanwhile, we identify the consistent objective between image infilling and our universal generation formulation, allowing us to seamlessly adapt general-purpose infilling models for universal generation without" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 120 + ], + "type": "text", + "content": "architectural modifications. Experimental results show that our approach supports a diverse set of in-domain tasks using in-context learning while demonstrating strong generalization to unseen tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 131, + 115, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 131, + 115, + 144 + ], + "spans": [ + { + "bbox": [ + 56, + 131, + 115, + 144 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 152, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 152, + 295, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 152, + 295, + 260 + ], + "spans": [ + { + "bbox": [ + 61, + 152, + 295, + 260 + ], + "type": "text", + "content": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andrew Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022. 3, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 262, + 295, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 262, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 62, + 262, + 295, + 293 + ], + "type": "text", + "content": "[2] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In ICLR, 2023. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 297, + 294, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 297, + 294, + 328 + ], + "spans": [ + { + "bbox": [ + 62, + 297, + 294, + 328 + ], + "type": "text", + "content": "[3] Ivana Balazevic, David Steiner, Nikhil Parthasarathy, Relja Arandjelovic, and Olivier J Henaff. Towards in-context scene understanding. In NeurIPS, 2023. 3, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 331, + 294, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 331, + 294, + 363 + ], + "spans": [ + { + "bbox": [ + 62, + 331, + 294, + 363 + ], + "type": "text", + "content": "[4] Amir Bar, Yossi Gandelsman, Trevor Darrell, Amir Globerson, and Alexei A Efros. Visual prompting via image inpainting. In NeurIPS, 2022. 3, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 365, + 294, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 365, + 294, + 407 + ], + "spans": [ + { + "bbox": [ + 62, + 365, + 294, + 407 + ], + "type": "text", + "content": "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. *NeurIPS*, 2020. 2, 4, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 410, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 410, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 62, + 410, + 294, + 430 + ], + "type": "text", + "content": "[6] John Canny. A computational approach to edge detection. IEEE TPAMI, 1986. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 432, + 294, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 432, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 62, + 432, + 294, + 464 + ], + "type": "text", + "content": "[7] Z. Cao, G. Hidalgo Martinez, T. Simon, S. Wei, and Y. A. Sheikh. Openpose: Realtime multi-person 2d pose estimation using part affinity fields. IEEE TPAMI, 2019. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 467, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 467, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 62, + 467, + 294, + 498 + ], + "type": "text", + "content": "[8] Lan Chen, Qi Mao, Yuchao Gu, and Mike Zheng Shou. Edit transfer: Learning image editing via vision in-context relations. arXiv preprint arXiv:2503.13327, 2025. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 501, + 294, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 501, + 294, + 564 + ], + "spans": [ + { + "bbox": [ + 62, + 501, + 294, + 564 + ], + "type": "text", + "content": "[9] Xi Chen, Zhifei Zhang, He Zhang, Yuqian Zhou, Soo Ye Kim, Qing Liu, Yijun Li, Jianming Zhang, Nanxuan Zhao, Yilin Wang, Hui Ding, Zhe Lin, and Hengshuang. Unireal: Universal image generation and editing via learning real-world dynamics. arXiv preprint arXiv:2412.07774, 2024. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 567, + 295, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 567, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 57, + 567, + 295, + 609 + ], + "type": "text", + "content": "[10] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In ICML, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 613, + 295, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 613, + 295, + 644 + ], + "spans": [ + { + "bbox": [ + 57, + 613, + 295, + 644 + ], + "type": "text", + "content": "[11] Seunghwan Choi, Sunghyun Park, Minsoo Lee, and Jaegul Choo. Viton-hd: High-resolution virtual try-on via misalignment-aware normalization. In CVPR, 2021. 2, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "type": "text", + "content": "[12] Zheng Chong, Xiao Dong, Haoxiang Li, shiyue Zhang, Wenqing Zhang, Hanqing Zhao, xujie zhang, Dongmei Jiang, and Xiaodan Liang. CatVTON: Concatenation is all you need for virtual try-on with diffusion models. In ICLR, 2025. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "type": "text", + "content": "[13] Prafulla Dhariwal and Alexander Quinn Nichol. Diffusion models beat GANs on image synthesis. In NeurIPS, 2021. 4" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 317, + 73, + 553, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 73, + 553, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 73, + 553, + 126 + ], + "type": "text", + "content": "[14] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Tianyu Liu, Baobao Chang, Xu Sun, Lei Li, and Zhifang Sui. A survey on in-context learning. arXiv preprint arXiv:2301.00234, 2024. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 129, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 553, + 205 + ], + "type": "text", + "content": "[15] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 207, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 207, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 207, + 553, + 239 + ], + "type": "text", + "content": "[16] Christopher Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In NeurIPS, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 242, + 553, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 242, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 317, + 242, + 553, + 304 + ], + "type": "text", + "content": "[17] Peng Gao, Le Zhuo, Dongyang Liu, Ruoyi Du, Xu Luo, Longtian Qiu, Yuhang Zhang, Chen Lin, Rongjie Huang, Shijie Geng, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 308, + 553, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 308, + 553, + 350 + ], + "spans": [ + { + "bbox": [ + 316, + 308, + 553, + 350 + ], + "type": "text", + "content": "[18] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2024. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 354, + 553, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 354, + 553, + 385 + ], + "spans": [ + { + "bbox": [ + 316, + 354, + 553, + 385 + ], + "type": "text", + "content": "[19] Golnaz Ghiasi, Barret Zoph, Ekin D. Cubuk, Quoc V. Le, and Tsung-Yi Lin. Multi-task self-training for learning general representations. In ICCV, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 388, + 553, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 553, + 419 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 553, + 419 + ], + "type": "text", + "content": "[20] Geonmo Gu, Byungsoo Ko, SeoungHyun Go, Sung-Hyun Lee, Jingeun Lee, and Minchul Shin. Towards light-weight and real-time line segment detection. In AAAI, 2022. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 422, + 553, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 553, + 453 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 553, + 453 + ], + "type": "text", + "content": "[21] Aaron Hertzmann. Algorithms for rendering in artistic styles. PhD thesis, New York University, Graduate School of Arts and Science, 2001. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 456, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 456, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 317, + 456, + 553, + 498 + ], + "type": "text", + "content": "[22] Aaron Hertzmann, Charles E. Jacobs, Nuria Oliver, Brian Curless, and David H. Salesin. Image analogies. In Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, 2001. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 501, + 553, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 501, + 553, + 554 + ], + "spans": [ + { + "bbox": [ + 317, + 501, + 553, + 554 + ], + "type": "text", + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 10, 16" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 557, + 553, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 557, + 553, + 578 + ], + "spans": [ + { + "bbox": [ + 317, + 557, + 553, + 578 + ], + "type": "text", + "content": "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 580, + 553, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 580, + 553, + 621 + ], + "spans": [ + { + "bbox": [ + 317, + 580, + 553, + 621 + ], + "type": "text", + "content": "[25] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In ICLR, 2022. 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 624, + 553, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 667 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 667 + ], + "type": "text", + "content": "[26] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Huanzhang Dou, Yupeng Shi, Yutong Feng, Chen Liang, Yu Liu, and Jingren Zhou. Group diffusion transformers are unsupervised multitask learners. arXiv preprint arxiv:2410.15027, 2024. 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "text", + "content": "[27] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arxiv:2410.23775, 2024. 2, 3" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[28] Shijie Huang, Yiren Song, Yuxuan Zhang, Hailong Guo, Xueyin Wang, Mike Zheng Shou, and Jiaming Liu. Photodoodle: Learning artistic image editing from few-shot pairwise data. arXiv preprint arXiv:2502.14397, 2025. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 161 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 161 + ], + "type": "text", + "content": "[29] Zehuan Huang, Yuanchen Guo, Haoran Wang, Ran Yi, Lizhuang Ma, Yan-Pei Cao, and Lu Sheng. Mv-adapter: Multi-view consistent image generation made easy. arXiv preprint arXiv:2412.03632, 2024. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 162, + 294, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 162, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 162, + 294, + 205 + ], + "type": "text", + "content": "[30] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 10, 16" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 206, + 294, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 206, + 294, + 237 + ], + "spans": [ + { + "bbox": [ + 56, + 206, + 294, + 237 + ], + "type": "text", + "content": "[31] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR, 2018. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 239, + 294, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 294, + 281 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 294, + 281 + ], + "type": "text", + "content": "[32] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In CVPR, 2017. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 282, + 294, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 282, + 294, + 314 + ], + "spans": [ + { + "bbox": [ + 56, + 282, + 294, + 314 + ], + "type": "text", + "content": "[33] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 3, 4, 5, 7, 10, 11, 16" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 316, + 294, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 316, + 294, + 370 + ], + "spans": [ + { + "bbox": [ + 56, + 316, + 294, + 370 + ], + "type": "text", + "content": "[34] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 372, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 372, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 56, + 372, + 294, + 414 + ], + "type": "text", + "content": "[35] Duong H. Le, Tuan Pham, Sangho Lee, Christopher Clark, Aniruddha Kembhavi, Stephan Mandt, Ranjay Krishna, and Jiasen Lu. One diffusion to generate them all, 2024. 2, 3, 4, 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 415, + 294, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 415, + 294, + 448 + ], + "spans": [ + { + "bbox": [ + 56, + 415, + 294, + 448 + ], + "type": "text", + "content": "[36] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "spans": [ + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "type": "text", + "content": "[37] Kunchang Li, Yali Wang, Junhao Zhang, Peng Gao, Guanglu Song, Yu Liu, Hongsheng Li, and Yu Qiao. Uniformer: Unifying convolution and self-attention for visual recognition. IEEE TPAMI, 2023. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 493, + 294, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 493, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 56, + 493, + 294, + 536 + ], + "type": "text", + "content": "[38] Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, MingMing Cheng, and Ying Shan. Photomaker: Customizing realistic human photos via stacked id embedding. In CVPR, 2024. 2, 4, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 537, + 294, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 537, + 294, + 591 + ], + "spans": [ + { + "bbox": [ + 56, + 537, + 294, + 591 + ], + "type": "text", + "content": "[39] Weifeng Lin, Xinyu Wei, Renrui Zhang, Le Zhuo, Shitian Zhao, Siyuan Huang, Junlin Xie, Yu Qiao, Peng Gao, and Hongsheng Li. Pixwizard: Versatile image-to-image visual assistant with open-language instructions. arXiv preprint arXiv:2409.15278, 2024. 2, 5, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 593, + 294, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 593, + 294, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 593, + 294, + 624 + ], + "type": "text", + "content": "[40] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In ICLR, 2023. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "spans": [ + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "type": "text", + "content": "[41] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "text", + "content": "[42] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 4" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 315, + 72, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 553, + 116 + ], + "type": "text", + "content": "[43] Yihao Liu, Xiangyu Chen, Xianzheng Ma, Xintao Wang, Jiantao Zhou, Yu Qiao, and Chao Dong. Unifying image processing as visual prompting question answering. arXiv preprint arXiv:2310.10513, 2023. 3, 4, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 118, + 553, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 118, + 553, + 162 + ], + "spans": [ + { + "bbox": [ + 315, + 118, + 553, + 162 + ], + "type": "text", + "content": "[44] Chaojie Mao, Jingfeng Zhang, Yulin Pan, Zeyinzi Jiang, Zhen Han, Yu Liu, and Jingren Zhou. Ace++: Instruction-based image creation and editing via context-aware content filling. arXiv preprint arXiv:2501.02487, 2025. 2, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 163, + 553, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 163, + 553, + 206 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 553, + 206 + ], + "type": "text", + "content": "[45] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073, 2021. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 209, + 553, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 209, + 553, + 252 + ], + "spans": [ + { + "bbox": [ + 315, + 209, + 553, + 252 + ], + "type": "text", + "content": "[46] Noor Nashid, Mifta Sintaha, and Ali Mesbah. Retrieval-based prompt selection for code-related few-shot learning. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pages 2450-2462. IEEE, 2023. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 254, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 254, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 315, + 254, + 553, + 308 + ], + "type": "text", + "content": "[47] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 11, 16" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 309, + 553, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 309, + 553, + 331 + ], + "spans": [ + { + "bbox": [ + 315, + 309, + 553, + 331 + ], + "type": "text", + "content": "[48] William Peebles and Saining Xie. Scalable diffusion models with transformers. In ICCV, 2023. 4, 16" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 332, + 553, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 332, + 553, + 397 + ], + "spans": [ + { + "bbox": [ + 315, + 332, + 553, + 397 + ], + "type": "text", + "content": "[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10, 11, 16" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 399, + 553, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 399, + 553, + 475 + ], + "spans": [ + { + "bbox": [ + 315, + 399, + 553, + 475 + ], + "type": "text", + "content": "[50] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, ChaoYuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 477, + 553, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 477, + 553, + 532 + ], + "spans": [ + { + "bbox": [ + 315, + 477, + 553, + 532 + ], + "type": "text", + "content": "[51] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 533, + 553, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 533, + 553, + 565 + ], + "spans": [ + { + "bbox": [ + 315, + 533, + 553, + 565 + ], + "type": "text", + "content": "[52] Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. arXiv preprint arXiv:2112.08633, 2021. 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 567, + 553, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 567, + 553, + 589 + ], + "spans": [ + { + "bbox": [ + 315, + 567, + 553, + 589 + ], + "type": "text", + "content": "[53] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 590, + 553, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 590, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 315, + 590, + 553, + 633 + ], + "type": "text", + "content": "[54] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 2, 16" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 635, + 553, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 635, + 553, + 679 + ], + "spans": [ + { + "bbox": [ + 315, + 635, + 553, + 679 + ], + "type": "text", + "content": "[55] Dianmo Sheng, Dongdong Chen, Zhentao Tan, Qiankun Liu, Qi Chu, Jianmin Bao, Tao Gong, Bin Liu, Shengwei Xu, and Nenghai Yu. Towards more unified in-context visual understanding. In CVPR, 2024. 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 315, + 681, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 681, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 681, + 553, + 713 + ], + "type": "text", + "content": "[56] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 295, + 714 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 77, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 73, + 294, + 95 + ], + "type": "text", + "content": "generation in any style. arXiv preprint arXiv:2306.00983, 2023.16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 96, + 295, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 96, + 295, + 129 + ], + "spans": [ + { + "bbox": [ + 57, + 96, + 295, + 129 + ], + "type": "text", + "content": "[57] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 130, + 295, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 130, + 295, + 174 + ], + "spans": [ + { + "bbox": [ + 56, + 130, + 295, + 174 + ], + "type": "text", + "content": "[58] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 175, + 295, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 175, + 295, + 217 + ], + "spans": [ + { + "bbox": [ + 57, + 175, + 295, + 217 + ], + "type": "text", + "content": "[59] Yanpeng Sun, Qiang Chen, Jian Wang, Jingdong Wang, and Zechao Li. Exploring effective factors for improving visual in-context learning. arXiv preprint arXiv:2304.04748, 2023. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 219, + 295, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 295, + 262 + ], + "type": "text", + "content": "[60] Yasheng SUN, Yifan Yang, Houwen Peng, Yifei Shen, Yuqing Yang, Han Hu, Lili Qiu, and Hideki Koike. Imagebrush: Learning visual in-context instructions for exemplar-based image manipulation. In NeurIPS, 2023. 3, 4, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 264, + 295, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 295, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 295, + 308 + ], + "type": "text", + "content": "[61] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 4, 5, 6, 10, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 309, + 270, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 309, + 270, + 320 + ], + "spans": [ + { + "bbox": [ + 57, + 309, + 270, + 320 + ], + "type": "text", + "content": "[62] Paints-Undo Team. Paints-undo github page, 2024. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 322, + 295, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 322, + 295, + 364 + ], + "spans": [ + { + "bbox": [ + 57, + 322, + 295, + 364 + ], + "type": "text", + "content": "[63] Alex Jinpeng Wang, Linjie Li, Yiqi Lin, Min Li, Lijuan Wang, and Mike Zheng Shou. Leveraging visual tokens for extended text contexts in multi-modal learning. NeurIPS, 2024. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 365, + 295, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 295, + 410 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 295, + 410 + ], + "type": "text", + "content": "[64] Haofan Wang, Peng Xing, Renyuan Huang, Hao Ai, Qixun Wang, and Xu Bai. Instantstyle-plus: Style transfer with content-preserving in text-to-image generation. arXiv preprint arXiv:2407.00788, 2024. 2, 4, 5, 10" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 411, + 295, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 295, + 487 + ], + "type": "text", + "content": "[65] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "type": "text", + "content": "[66] Xinlong Wang, Wen Wang, Yue Cao, Chunhua Shen, and Tiejun Huang. Images speak in images: A generalist painter for in-context visual learning. In CVPR, 2023. 3, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 522, + 295, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 295, + 555 + ], + "type": "text", + "content": "[67] Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, and Tiejun Huang. Seggpt: Towards segmenting everything in context. In ICCV, 2023. 3, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 556, + 295, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 295, + 600 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 295, + 600 + ], + "type": "text", + "content": "[68] Zhendong Wang, Yifan Jiang, Yadong Lu, yelong shen, Pengcheng He, Weizhu Chen, Zhangyang Wang, and Mingyuan Zhou. In-context learning unlocked for diffusion models. In NeurIPS, 2023. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 601, + 295, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 295, + 646 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 295, + 646 + ], + "type": "text", + "content": "[69] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image editing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 2, 3, 5, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "text", + "content": "[70] Shaojin Wu, Mengqi Huang, Wenxu Wu, Yufeng Cheng, Fei Ding, and Qian He. Less-to-more generalization: Unlocking more controllability by in-context generation. arXiv preprint arXiv:2504.02160, 2025. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 690, + 295, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 690, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 690, + 295, + 714 + ], + "type": "text", + "content": "[71] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2, 3, 4, 10, 11" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 95, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 95, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 95, + 553, + 116 + ], + "type": "text", + "content": "[72] Saining Xie and Zhuowen Tu. Holistically-nested edge detection. In CVPR, 2015. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "type": "text", + "content": "[73] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, Fisher Yu, Dacheng Tao, and Andreas Geiger. Unifying flow, stereo and depth estimation. IEEE TPAMI, 2023. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 151, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 151, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 316, + 151, + 553, + 183 + ], + "type": "text", + "content": "[74] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 184, + 553, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 184, + 553, + 249 + ], + "spans": [ + { + "bbox": [ + 316, + 184, + 553, + 249 + ], + "type": "text", + "content": "[75] Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, and Yujiu Yang. Maniaq: Multi-dimension attention network for no-reference image quality assessment. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1191-1200, 2022. 10, 16" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 250, + 553, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 250, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 316, + 250, + 553, + 293 + ], + "type": "text", + "content": "[76] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 293, + 553, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 293, + 553, + 348 + ], + "spans": [ + { + "bbox": [ + 316, + 293, + 553, + 348 + ], + "type": "text", + "content": "[77] Fanghua Yu, Jinjin Gu, Zheyuan Li, Jinfan Hu, Xiangtao Kong, Xintao Wang, Jingwen He, Yu Qiao, and Chao Dong. Scaling up to excellence: Practicing model scaling for photo-realistic image restoration in the wild. arXiv preprint arXiv:2401.13627, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 349, + 553, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 349, + 553, + 425 + ], + "spans": [ + { + "bbox": [ + 316, + 349, + 553, + 425 + ], + "type": "text", + "content": "[78] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 426, + 553, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 426, + 553, + 448 + ], + "spans": [ + { + "bbox": [ + 316, + 426, + 553, + 448 + ], + "type": "text", + "content": "[79] Hayoung Yun and Hanjoo Cho. Achievement-based training progress balancing for multi-task learning. In ICCV, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 449, + 553, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 553, + 480 + ], + "type": "text", + "content": "[80] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, 2023. 3, 4, 5, 10" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 481, + 553, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 481, + 553, + 524 + ], + "spans": [ + { + "bbox": [ + 316, + 481, + 553, + 524 + ], + "type": "text", + "content": "[81] Yuxin Zhang, Nisha Huang, Fan Tang, Haibin Huang, Chongyang Ma, Weiming Dong, and Changsheng Xu. Inversion-based style transfer with diffusion models. In CVPR, 2023. 2, 3, 11" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 525, + 553, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 525, + 553, + 557 + ], + "spans": [ + { + "bbox": [ + 316, + 525, + 553, + 557 + ], + "type": "text", + "content": "[82] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? In NeurIPS, 2023. 3, 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 559, + 553, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 559, + 553, + 602 + ], + "spans": [ + { + "bbox": [ + 316, + 559, + 553, + 602 + ], + "type": "text", + "content": "[83] Canyu Zhao, Mingyu Liu, Huanyi Zheng, Muzhi Zhu, Zhiyue Zhao, Hao Chen, Tong He, and Chunhua Shen. Disception: A generalist diffusion model for visual perceptual tasks. arXiv preprint arXiv:2502.17157, 2025. 4" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 602, + 553, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 602, + 553, + 647 + ], + "spans": [ + { + "bbox": [ + 316, + 602, + 553, + 647 + ], + "type": "text", + "content": "[84] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CAAI Artificial Intelligence Research, 2024. 5" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 647, + 553, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 553, + 679 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 553, + 679 + ], + "type": "text", + "content": "[85] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, 2017. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "type": "text", + "content": "[86] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024. 4" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 194 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 116 + ], + "type": "text", + "content": "[87] Muzhi Zhu, Yang Liu, Zekai Luo, Chenchen Jing, Hao Chen, Guangkai Xu, Xinlong Wang, and Chunhua Shen. Unleashing the potential of the diffusion model in few-shot semantic segmentation. In NeurIPS, 2024. 3, 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 194 + ], + "type": "text", + "content": "[88] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Xiangyang Zhu, Fu-Yun Wang, Zhanyu Ma, Xu Luo, Zehan Wang, Kaipeng Zhang, Lirui Zhao, Si Liu, Xiangyu Yue, Wanli Ouyang, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina next: Making lumina-t2x stronger and faster with next-dit. In NeurIPS, 2024. 2, 3, 4" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 224, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 224, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 224, + 85 + ], + "type": "text", + "content": "Appendix A. Instruction Format" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 200 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 200 + ], + "type": "text", + "content": "In our unified framework, the instruction consists of three parts: (1) layout instruction, which describes the layout of the grid image; (2) task instruction, which specifies the task type; and (3) content instruction, which describes the content of the target image. Fig. 13 illustrates the instructions for concept fusion of style, subject, and layout (Fig. 13 upper) and image editing with reference (Fig. 13 bottom). The content instruction is omitted for some tasks that provide strong visual cues in conditions, like style transfer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 212, + 288, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 212, + 288, + 226 + ], + "spans": [ + { + "bbox": [ + 55, + 212, + 288, + 226 + ], + "type": "text", + "content": "Appendix B. Fine-tuning FLUX.1-dev Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "text", + "content": "Apart from FLUX.1-Fill-dev, we also adapt our method to FLUX.1-dev [33], a common text-to-image generative model. Unlike the infilling model that shares a consistent objective with universal image generation, FLUX.1-dev requires customized modifications to process clean condition images and noise target images. Specifically, after concatenating images in a grid layout like the infilling model, we always keep the region corresponding to the conditions as clean latent embeddings throughout the sampling process. This strategy requires modifications in image sampling because FLUX.1-Fill-dev takes noise latent embeddings as input. Moreover, for the adaLN-Zero block [48], it is critical to calculate the separate mean and shift parameters for the regions of clean conditions and noise target by feeding " + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "inline_equation", + "content": "T = 0" + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "inline_equation", + "content": "T = t" + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "text", + "content": " into the adaLN-Zero, respectively. " + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 232, + 296, + 483 + ], + "type": "text", + "content": " indicates the timestep in each sampling step and gradually increases from 0 to 1 along the sampling process. This strategy aligns with the pre-training domain of FLUX.1-dev, where different noise levels correspond to different mean and shift. As shown in Fig. 14, this strategy ensures the visual fidelity." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 497, + 224, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 497, + 224, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 497, + 224, + 510 + ], + "type": "text", + "content": "Appendix C. Evaluation Metrics" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 517, + 198, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 517, + 198, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 517, + 198, + 529 + ], + "type": "text", + "content": "C.1. Conditioning Generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 535, + 295, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 535, + 295, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 535, + 295, + 571 + ], + "type": "text", + "content": "We assess the models from controllability, quality, and text consistency to evaluate image generation quality in conditioning generation and image restoration tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 582, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 654 + ], + "type": "text", + "content": "Controllability. For conditional image generation, we measure the difference between the input conditions and those extracted from generated images. Specifically, we calculate the F1 Score for the cany-to-image task and RMSE for the depth-to-image task. Additionally, for deblurring, we measure the RMSE between original and restored images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 666, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 666, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 296, + 715 + ], + "type": "text", + "content": "Generation quality. We measure the Generation quality using FID [23], SSIM, MAN-IQA [75], and MAN-IQA [75]. FID [23] measures the similarity between generated and real image feature distributions. SSIM evalu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 555, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 145 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 145 + ], + "type": "text", + "content": "ates perceptual quality by comparing luminance, contrast, and structural patterns between images. It calculates local patch statistics and combines them into a composite score ranging from " + }, + { + "bbox": [ + 313, + 72, + 555, + 145 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 313, + 72, + 555, + 145 + ], + "type": "text", + "content": " to 1, with higher values indicating better structural preservation. MANIQA [75] and MUSIQ [30] leverage neural networks to predict image quality scores." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 159, + 555, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 159, + 555, + 206 + ], + "spans": [ + { + "bbox": [ + 313, + 159, + 555, + 206 + ], + "type": "text", + "content": "Text consistency. Leveraging the powerful multi-modal capability of CLIP [49], we also measure the semantic alignment between generated images and text prompts, which reflects how the model follows instructions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 213, + 466, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 213, + 466, + 227 + ], + "spans": [ + { + "bbox": [ + 314, + 213, + 466, + 227 + ], + "type": "text", + "content": "C.2. Subject Driven Generation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 231, + 555, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 231, + 555, + 328 + ], + "spans": [ + { + "bbox": [ + 313, + 231, + 555, + 328 + ], + "type": "text", + "content": "Following DreamBooth [54] and BLIP-Diffusion [36], we measure DINOv2 [47], CLIP-I [49], and CLIP-T scores for the comparison of subject-driven image generation. DINOv2 [47] and CLIP-I scores measure the alignment between the reference subject and generated images through cosine similarity and CLIP score, respectively. CLIP-T measures the alignment between the generated image and the corresponding text prompt." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 335, + 406, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 335, + 406, + 346 + ], + "spans": [ + { + "bbox": [ + 314, + 335, + 406, + 346 + ], + "type": "text", + "content": "C.3. Style Transfer" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 352, + 556, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 352, + 556, + 484 + ], + "spans": [ + { + "bbox": [ + 313, + 352, + 556, + 484 + ], + "type": "text", + "content": "Following StyleDrop [56], we assess the performance of style transfer according to text consistency and style alignment. For text alignment, we measure the cosine similarity between embeddings of generated images and text prompts, where the embeddings are extracted by CLIP [49]. Regarding style consistency, we measure the cosine similarity between embeddings of generated images and style reference. Note that these two metrics should be considered together because the style consistency will reach 1.0 if the model collapses, where the model completely copies style reference as a composite image and ignores text instructions." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 69, + 388, + 324 + ], + "blocks": [ + { + "bbox": [ + 72, + 69, + 388, + 324 + ], + "lines": [ + { + "bbox": [ + 72, + 69, + 388, + 324 + ], + "spans": [ + { + "bbox": [ + 72, + 69, + 388, + 324 + ], + "type": "image", + "image_path": "ecbf871ff2a97d743b601e08351ab7eb90c6ec2fea0b8095ee48c514aff5062d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 402, + 76, + 481, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 76, + 481, + 87 + ], + "spans": [ + { + "bbox": [ + 402, + 76, + 481, + 87 + ], + "type": "text", + "content": "Layout instruction:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 402, + 97, + 520, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 97, + 520, + 130 + ], + "spans": [ + { + "bbox": [ + 402, + 97, + 520, + 130 + ], + "type": "text", + "content": "12 images are organized into a grid of 3 rows and 4 columns, evenly spaced." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 402, + 144, + 471, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 144, + 471, + 154 + ], + "spans": [ + { + "bbox": [ + 402, + 144, + 471, + 154 + ], + "type": "text", + "content": "Task instruction:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 402, + 163, + 532, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 163, + 532, + 262 + ], + "spans": [ + { + "bbox": [ + 402, + 163, + 532, + 262 + ], + "type": "text", + "content": "Each row describes a process that begins with [IMAGE1] white edge lines on black from canny detection, [IMAGE2] Photo with a strong artistic theme, [IMAGE3] a reference image showcasing the dominant object and results in [IMAGE4] High-quality visual with distinct artistic touch." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 402, + 268, + 484, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 268, + 484, + 278 + ], + "spans": [ + { + "bbox": [ + 402, + 268, + 484, + 278 + ], + "type": "text", + "content": "Content instruction:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 404, + 286, + 414, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 404, + 286, + 414, + 300 + ], + "spans": [ + { + "bbox": [ + 404, + 286, + 414, + 300 + ], + "type": "text", + "content": "0" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 73, + 331, + 388, + 647 + ], + "blocks": [ + { + "bbox": [ + 73, + 331, + 388, + 647 + ], + "lines": [ + { + "bbox": [ + 73, + 331, + 388, + 647 + ], + "spans": [ + { + "bbox": [ + 73, + 331, + 388, + 647 + ], + "type": "image", + "image_path": "57e0baf0528b693e26676352db38fc6d3da52d3b54bc71b5459c16e30fbeb04e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 649, + 280, + 659 + ], + "lines": [ + { + "bbox": [ + 179, + 649, + 280, + 659 + ], + "spans": [ + { + "bbox": [ + 179, + 649, + 280, + 659 + ], + "type": "text", + "content": "(a) Concatenated images" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 661, + 555, + 683 + ], + "lines": [ + { + "bbox": [ + 55, + 661, + 555, + 683 + ], + "spans": [ + { + "bbox": [ + 55, + 661, + 555, + 683 + ], + "type": "text", + "content": "Figure 13. Examples of language instructions that contain prompts about the layout of the concatenated image, task intent, and content of the target image." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 402, + 350, + 481, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 350, + 481, + 361 + ], + "spans": [ + { + "bbox": [ + 402, + 350, + 481, + 361 + ], + "type": "text", + "content": "Layout instruction:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 402, + 373, + 526, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 373, + 526, + 406 + ], + "spans": [ + { + "bbox": [ + 402, + 373, + 526, + 406 + ], + "type": "text", + "content": "A 3x3 grid containing 9 images, aligned in a clean and structured layout" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 402, + 422, + 471, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 422, + 471, + 432 + ], + "spans": [ + { + "bbox": [ + 402, + 422, + 471, + 432 + ], + "type": "text", + "content": "Task instruction:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 402, + 442, + 532, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 442, + 532, + 510 + ], + "spans": [ + { + "bbox": [ + 402, + 442, + 532, + 510 + ], + "type": "text", + "content": "Every row provides a step-by-step guide to evolve [IMAGE1] a reference image with the main subject included, [IMAGE2] an image with flawless clarity into [IMAGE3] a high-quality image." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 402, + 522, + 484, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 522, + 484, + 532 + ], + "spans": [ + { + "bbox": [ + 402, + 522, + 484, + 532 + ], + "type": "text", + "content": "Content instruction:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 402, + 544, + 532, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 544, + 532, + 622 + ], + "spans": [ + { + "bbox": [ + 402, + 544, + 532, + 622 + ], + "type": "text", + "content": "The bottom-right corner image presents: A glossy gel nail polish bottle. At the edge of a bustling city park, this item rests on vibrant green grass, captured with a subtle bokeh effect as joggers and pets move in the background." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 417, + 648, + 521, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 648, + 521, + 659 + ], + "spans": [ + { + "bbox": [ + 417, + 648, + 521, + 659 + ], + "type": "text", + "content": "(b) Language instructions" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 308, + 180, + 385 + ], + "blocks": [ + { + "bbox": [ + 96, + 295, + 138, + 305 + ], + "lines": [ + { + "bbox": [ + 96, + 295, + 138, + 305 + ], + "spans": [ + { + "bbox": [ + 96, + 295, + 138, + 305 + ], + "type": "text", + "content": "Condition" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 308, + 180, + 385 + ], + "lines": [ + { + "bbox": [ + 57, + 308, + 180, + 385 + ], + "spans": [ + { + "bbox": [ + 57, + 308, + 180, + 385 + ], + "type": "image", + "image_path": "ec06ee6695d88b0d46b5f25e4cca7be24315691f1a9ddf2b5a2a9ac4308e52f4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 181, + 308, + 303, + 385 + ], + "blocks": [ + { + "bbox": [ + 225, + 295, + 252, + 306 + ], + "lines": [ + { + "bbox": [ + 225, + 295, + 252, + 306 + ], + "spans": [ + { + "bbox": [ + 225, + 295, + 252, + 306 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 181, + 308, + 303, + 385 + ], + "lines": [ + { + "bbox": [ + 181, + 308, + 303, + 385 + ], + "spans": [ + { + "bbox": [ + 181, + 308, + 303, + 385 + ], + "type": "image", + "image_path": "0e85569193972fbb96aa49172e0269e931543b886ef36bfc2dece96a9861cc93.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 308, + 308, + 430, + 385 + ], + "blocks": [ + { + "bbox": [ + 347, + 295, + 388, + 305 + ], + "lines": [ + { + "bbox": [ + 347, + 295, + 388, + 305 + ], + "spans": [ + { + "bbox": [ + 347, + 295, + 388, + 305 + ], + "type": "text", + "content": "Condition" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 308, + 430, + 385 + ], + "lines": [ + { + "bbox": [ + 308, + 308, + 430, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 430, + 385 + ], + "type": "image", + "image_path": "8f28dcad99cf5e02fe353236076009c397e7bdd7f66c8b64768a20abe40ff522.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 430, + 308, + 553, + 385 + ], + "blocks": [ + { + "bbox": [ + 478, + 295, + 504, + 305 + ], + "lines": [ + { + "bbox": [ + 478, + 295, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 478, + 295, + 504, + 305 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 430, + 308, + 553, + 385 + ], + "lines": [ + { + "bbox": [ + 430, + 308, + 553, + 385 + ], + "spans": [ + { + "bbox": [ + 430, + 308, + 553, + 385 + ], + "type": "image", + "image_path": "fec04428066c5f332887357520d323ebdf63d8418f8810016904644ec989f7e1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 58, + 392, + 176, + 460 + ], + "blocks": [ + { + "bbox": [ + 58, + 392, + 176, + 460 + ], + "lines": [ + { + "bbox": [ + 58, + 392, + 176, + 460 + ], + "spans": [ + { + "bbox": [ + 58, + 392, + 176, + 460 + ], + "type": "image", + "image_path": "730cf23dcb3263363f4822c37e1a3b3bc81798efc1e9fd59a950a41fc35132f8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 469, + 235, + 479 + ], + "lines": [ + { + "bbox": [ + 126, + 469, + 235, + 479 + ], + "spans": [ + { + "bbox": [ + 126, + 469, + 235, + 479 + ], + "type": "text", + "content": "(a) separate mean and shift" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 179, + 392, + 303, + 460 + ], + "blocks": [ + { + "bbox": [ + 179, + 392, + 303, + 460 + ], + "lines": [ + { + "bbox": [ + 179, + 392, + 303, + 460 + ], + "spans": [ + { + "bbox": [ + 179, + 392, + 303, + 460 + ], + "type": "image", + "image_path": "d354e8d0d32131643b1cced4782295a381b1ae6609006c1e868af1fd71828515.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 487, + 437, + 498 + ], + "lines": [ + { + "bbox": [ + 171, + 487, + 437, + 498 + ], + "spans": [ + { + "bbox": [ + 171, + 487, + 437, + 498 + ], + "type": "text", + "content": "Figure 14. Effects of separate mean and shift in fine-tuning FLUX.1-dev." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 308, + 392, + 429, + 460 + ], + "blocks": [ + { + "bbox": [ + 308, + 392, + 429, + 460 + ], + "lines": [ + { + "bbox": [ + 308, + 392, + 429, + 460 + ], + "spans": [ + { + "bbox": [ + 308, + 392, + 429, + 460 + ], + "type": "image", + "image_path": "b956840f0c8e329182c2355da90915a90cc86e1132b6cc30aead6bf424475919.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 469, + 484, + 479 + ], + "lines": [ + { + "bbox": [ + 378, + 469, + 484, + 479 + ], + "spans": [ + { + "bbox": [ + 378, + 469, + 484, + 479 + ], + "type": "text", + "content": "(b) unified mean and shift" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 430, + 392, + 553, + 460 + ], + "blocks": [ + { + "bbox": [ + 430, + 392, + 553, + 460 + ], + "lines": [ + { + "bbox": [ + 430, + 392, + 553, + 460 + ], + "spans": [ + { + "bbox": [ + 430, + 392, + 553, + 460 + ], + "type": "image", + "image_path": "8748534db01d157266d73470cc0bf47cd9889217f45f4307fea447a43120e8f8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_content_list.json b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..543c3bbd8563d9cbb74508dca343a73b537514ec --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_content_list.json @@ -0,0 +1,2134 @@ +[ + { + "type": "text", + "text": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction", + "text_level": 1, + "bbox": [ + 104, + 130, + 890, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zeren Jiang1 Chuanxia Zheng1 Iro Laina1 Diane Larlus2 Andrea Vedaldi1 \n1Visual Geometry Group, University of Oxford 2Naver Labs Europe", + "bbox": [ + 166, + 179, + 826, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zeren, cxzheng, iro, vedaldi}@robots.ox.ac.uk diane.larlus@naverlabs.com", + "bbox": [ + 169, + 219, + 828, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "geo4d.github.io", + "bbox": [ + 428, + 239, + 565, + 252 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg", + "image_caption": [ + "Figure 1. Geo4D repurposes a video diffusion model [102] for monocular 4D reconstruction. It uses only synthetic data for training, yet generalizes well to out-of-domain real videos. It predicts several geometric modalities, including point maps, disparity maps, and ray maps, fusing and aligning them to obtain state-of-the-art dynamic reconstruction even for scenes with extreme object and camera motion." + ], + "image_footnote": [], + "bbox": [ + 94, + 290, + 903, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 551, + 326, + 566 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce Geo4D, a method to repurpose video diffusion models for monocular 3D reconstruction of dynamic scenes. By leveraging the strong dynamic priors captured by large-scale pre-trained video models, Geo4D can be trained using only synthetic data while generalizing well to real data in a zero-shot manner. Geo4D predicts several complementary geometric modalities, namely point, disparity, and ray maps. We propose a new multi-modal alignment algorithm to align and fuse these modalities, as well as a sliding window approach at inference time, thus enabling robust and accurate 4D reconstruction of long videos. Extensive experiments across multiple benchmarks show that Geo4D significantly surpasses state-of-the-art video depth estimation methods.", + "bbox": [ + 88, + 584, + 482, + 794 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 828, + 220, + 843 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We consider the problem of feed-forward 4D reconstruction, which involves learning a neural network to reconstruct the 3D geometry of a dynamic scene from a monoc", + "bbox": [ + 89, + 854, + 482, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ular video. This task is particularly challenging for videos captured in uncontrolled settings, such as those shot with handheld cameras or downloaded from the Internet. However, a robust solution to this problem would have a tremendous impact on a wide range of applications, from video understanding to computer graphics and robotics.", + "bbox": [ + 511, + 551, + 903, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4D reconstruction from videos is related to multi-view static 3D reconstruction, which is typically addressed using methods from visual geometry like bundle adjustment. Recent neural networks [89, 92] have emerged as powerful tools that can replace, or at least complement, bundle adjustment. They excel especially in difficult reconstruction scenarios, involving, e.g., textureless surfaces and occlusions, thanks to the priors they learn from data. Given the additional challenges involved in 4D reconstruction, we expect that such priors would benefit this task even more.", + "bbox": [ + 511, + 643, + 905, + 794 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In fact, powerful networks like DUSt3R [92], designed for static multi-view 3D reconstruction, have recently been extended to the dynamic case, for example by MonST3R [113]. However, these models are heavily engineered to solve specific 3D reconstruction problems. Most importantly, they require significant amounts of training data with 3D annotations for supervision. Such data", + "bbox": [ + 511, + 795, + 905, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07961v2 [cs.CV] 19 Aug 2025", + "bbox": [ + 22, + 273, + 60, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is difficult to collect for dynamic scenes, especially in real life. This suggests using 4D synthetic training data instead. However, this data is difficult to obtain at scale, and the gap with the real world can compromise generalization.", + "bbox": [ + 89, + 90, + 480, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "One way to mitigate this problem is to pre-train the model on tasks related to 3D reconstruction for which real data is easily available. For example, DUSt3R [92] and derived methods [113] use image matching for pretraining [98]. Here, we suggest starting instead from an off-the-shelf video generator. Video generators are powerful models, often considered proxies of world simulators [37, 54, 59]. More importantly for us, the videos they generate demonstrate an understanding of effects like camera motion and perspective, as well as typical object motion in the context of a scene. However, they only generate pixels, leaving any 3D or 4D understanding implicit and thus not directly actionable.", + "bbox": [ + 88, + 154, + 482, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we show that a pre-trained off-the-shelf video generator can be turned into an effective monocular feed-forward 4D reconstructor. To this end, we introduce Geo4D, a novel approach for adapting Video Generators for Geometric 4D Reconstruction. With Geo4D, we demonstrate that these generic video architectures can successfully solve complex 4D reconstruction tasks, which is a step towards future video foundation models that natively integrate 4D geometry. Prior work such as Marigold [28] and concurrent work DepthCrafter [22] have looked at adapting, respectively, image and video generators for depth estimation. Here, we go one step further and consider the full recovery of 4D geometry, including camera motion and dynamic 3D structure.", + "bbox": [ + 88, + 351, + 482, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With Geo4D, our goal is to make 4D geometry explicit in the video generator. This in turn requires us to choose an explicit representation of 4D information. We follow DUSt3R and adopt its viewpoint-invariant point maps. Namely, we associate each pixel in each frame with the coordinate of the corresponding 3D point, expressed relative to the first frame in the video, used as a reference. Hence, the static parts of the point clouds extracted from the different frames line up, and the dynamic parts form a 3D 'trace' of the motion of the dynamic objects, as shown in Fig. 1.", + "bbox": [ + 88, + 564, + 482, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Viewpoint-invariant point maps are a powerful representation because they implicitly encode the camera motion and intrinsics and can be easily predicted by a neural network [92]. However, they are not necessarily the best representation for all parts of the scene, particularly for points far away from the observer or even at infinity, such as the sky. We thus consider two more modalities with better dynamic range, namely disparity maps and camera ray maps. Ray maps, in particular, are defined for all image pixels regardless of the scene geometry.", + "bbox": [ + 89, + 718, + 482, + 867 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our model thus predicts three modalities: point, disparity, and ray maps. These modalities are redundant in prin", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ciple, but complementary in practice. At test time, we reconcile them via a fast, global optimization step and show that this leads to significantly more robust 4D reconstructions. Due to depth and ray map prediction, we show very strong empirical results on video depth estimation and in the recovery of the camera orientation.", + "bbox": [ + 511, + 90, + 903, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "One of the challenges of monocular 4D reconstruction is that it is ambiguous, significantly more so than static 3D reconstruction. However, the stochastic nature of the video generator can help deal with this ambiguity. We also introduce uncertainty maps in the encoder-decoder architecture that processes the geometric maps, and integrate them into the multi-modal alignment process.", + "bbox": [ + 511, + 181, + 903, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, our contributions are as follows. (i) We introduce Geo4D, a 4D feed-forward network for dynamic scene reconstruction that builds on top of an off-the-shelf video generator. (ii) We suggest generating multiple partially redundant geometric modalities and fusing them at test time via lightweight optimization. (iii) We show the benefits of this multi-modal fusion in terms of improved 4D prediction accuracy. Experiments show that this model can reconstruct even highly dynamic scenes (such as the drifting scene in DAVIS [23] presented in Fig. 1) and outperforms current video depth and camera rotation estimation methods.", + "bbox": [ + 511, + 289, + 903, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 511, + 470, + 653, + 486 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Dynamic Scene Reconstruction", + "text_level": 1, + "bbox": [ + 511, + 496, + 785, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Static 3D reconstruction. Feed-forward 3D reconstruction has achieved remarkable success across various representations, including voxels [11, 74, 83], meshes [18, 72, 90], and point clouds [41, 110]. These advancements have been further driven by implicit neural representations [52, 56, 60, 75] and the emergence of 3D Gaussian Splatting (3D-GS) [7, 9, 29, 76, 79, 80]. Recently, DUS3R [92] introduced a point map representation for scene-level 3D reconstruction, followed by [35, 86, 89, 104]. However, these models predominantly focus on static 3D reconstruction. Our approach also uses point maps as a representation but extends them to handle dynamic scenes, which present additional challenges due to object motion over time.", + "bbox": [ + 511, + 518, + 903, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Iterative 4D reconstruction. Iterative or optimization-based approaches reconstruct 4D models from monocular videos by iteratively fitting the observed data. Classical techniques often rely on RGB-D sensors [24, 53], but such steps are impractical for many real-world scenes. Recently, with advancements in neural representations [52, 56], NeRF-based approaches [27, 38, 39, 57, 58, 62] have shown impressive results. However, volume rendering in NeRF is computationally expensive. Convergence and rendering speed can be improved by using 3D-GS representations [12, 29, 34, 43, 91, 99, 107, 111], which reduce but do not eliminate the cost of iterative optimization. Very", + "bbox": [ + 511, + 719, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg", + "image_caption": [ + "Figure 2. Overview of Geo4D. During training, video conditions are injected by locally concatenating the latent feature of the video with diffused geometric features $\\mathbf{z}_t^{\\mathrm{X}},\\mathbf{z}_t^{\\mathrm{D}},\\mathbf{z}_t^{\\mathrm{r}}$ and are injected globally via cross-attention in the denoising U-Net, after CLIP encoding and a query transformer. The U-Net is fine-tuned via Eq. 2. During inference, iteratively denoised latent features $\\hat{\\mathbf{z}}_0^{\\mathrm{X}},\\hat{\\mathbf{z}}_0^{\\mathrm{D}},\\hat{\\mathbf{z}}_0^{\\mathrm{r}}$ are decoded by the fine-tuned VAE decoder, followed by multi-modal alignment optimization for coherent 4D reconstruction." + ], + "image_footnote": [], + "bbox": [ + 98, + 90, + 903, + 284 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "recently, MegaSaM [40] achieved highly accurate and robust camera pose estimation and reconstruction for dynamic videos, but it requires accurate monocular depth priors. Similarly, Uni4D [108] produces accurate 4D reconstructions by leveraging various visual foundation models and performing multi-stage bundle adjustment. In contrast, our approach is a diffusion-driven feed-forward framework, which eliminates the need for per-video bundle adjustment and external depth estimation models.", + "bbox": [ + 88, + 356, + 480, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Feed-forward 4D reconstruction. Similar to our approach, recent works have started to explore feed-forward 4D reconstruction for dynamic scenes: a monocular video with dynamic objects is processed by a neural network to recover a 4D representation. For objects, L4GM [66] andAnimate3D [26] first generate multi-view videos from a monocular video input, and subsequently apply 3D-GS [29] to reconstruct a temporally consistent 4D model. For scenes, a notable example is MonST3R [113], which adapts the static scene reconstruction of DUSt3R [92] to handle dynamic scenes. Very recently, Easi3R [8] applies attention adaptation during inference and performs 4D reconstruction based on DUSt3R [92] in an efficient, training-free manner.", + "bbox": [ + 89, + 498, + 483, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Geometric Diffusion Models", + "text_level": 1, + "bbox": [ + 89, + 710, + 344, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our method builds upon advancements in video diffusion models [3, 4, 16, 19, 21, 31, 73, 88, 94, 102, 112], which generate temporally consistent videos from text or image prompts. Recent studies have explored the rich 3D priors embedded within large-scale pre-trained diffusion models, employing either knowledge distillation [25, 42, 51, 61, 87, 96] or fine-tuning [20, 36, 45-47, 71, 85, 118] for 3D reconstruction and generation. While these methods have significantly advanced single-object 3D reconstruction from sparse inputs, they remain largely constrained to static, isolated objects centered within an image. Beyond single", + "bbox": [ + 89, + 734, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "object reconstruction, several recent efforts have extended pre-trained diffusion models to tackle scene-level 3D tasks, such as optical flow estimation [69], view synthesis [10, 15, 44, 68, 81, 109], depth estimation [13, 28, 117], and normal estimation [14, 33, 63]. More related to our approach, Matrix3D [49] jointly predicts depth and camera parameters, and WVD [115] introduces a hybrid RGB+point map representation for scene reconstruction. However, these approaches assume static 3D environments, whereas we address dynamic 4D scene reconstruction, which is a much harder problem due to object motion across time.", + "bbox": [ + 511, + 356, + 906, + 522 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "More closely related to our approach, concurrent GeometryCrafter [103] introduced a point map VAE with a dual encoder-decoder architecture to improve reconstruction accuracy. However, their point maps are defined in individual camera coordinates, necessitating the use of additional segmentation [30] and tracking models [101] to recover the global point map and estimate camera poses. Aether [82], on the other hand, outputs depth maps and ray maps from a video diffusion model for 4D reconstruction. In contrast, our experiments demonstrate that performance can be significantly enhanced by jointly predicting multiple geometric modalities that capture diverse dynamic ranges, ensuring better temporal coherence and robustness. Importantly, our approach is self-contained and does not rely on external models, enhancing its generality and reliability.", + "bbox": [ + 511, + 523, + 908, + 751 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 513, + 768, + 604, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to learn a neural network $f_{\\theta}$ that can reconstruct dynamic 3D scenes from monocular videos. Given as input a monocular video $\\mathcal{I} = \\{I^i\\}_{i=1}^N$ consisting of $N$ frames, where each frame is an RGB image $I^i \\in \\mathbb{R}^{H \\times W \\times 3}$ , the network $f_{\\theta}$ returns a representation of its 4D geometry:", + "bbox": [ + 511, + 795, + 906, + 871 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\boldsymbol {\\theta}}: \\left\\{\\boldsymbol {I} ^ {i} \\right\\} _ {i = 1} ^ {N} \\mapsto \\left\\{\\left(\\boldsymbol {D} ^ {i}, \\boldsymbol {X} ^ {i}, \\boldsymbol {r} ^ {i}\\right) \\right\\} _ {i = 1} ^ {N}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 883, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The network computes the disparity map $D^{i}\\in \\mathbb{R}^{H\\times W\\times 1}$ the viewpoint-invariant point map $X^{i}\\in \\mathbb{R}^{H\\times W\\times 3}$ , and the ray map $\\pmb {r}^i\\in \\mathbb{R}^{H\\times W\\times 6}$ for each frame $I^i$ $i = 1,\\dots ,N$ . As we discuss in Sec. 3.2, these quantities collectively represent the 4D geometry of a scene, including its dynamic structure and time-varying camera extrinsic and intrinsic parameters. No camera parameters are provided as input; these are implicitly estimated by the model as well.", + "bbox": [ + 89, + 89, + 480, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We implement $f_{\\theta}$ as a video diffusion model, where $\\theta$ are the learnable parameters. We discuss the relevant background on video diffusion models in Sec. 3.1. Then, in Sec. 3.2, we describe how we extend the model to predict the three modalities of the 4D geometry. Finally, in Sec. 3.3, we describe how we fuse and align these modalities to obtain a coherent 4D reconstruction at test time.", + "bbox": [ + 89, + 210, + 480, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Preliminaries: Video Diffusion Model", + "text_level": 1, + "bbox": [ + 89, + 325, + 415, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our key insight is that by building on pre-trained video diffusion models, our approach can exploit the strong motion and scene geometry priors inherently encoded within these models. Specifically, we build Geo4D on top of DynamiCrafter [102], a \"foundation\" video diffusion model. DynamiCrafter is a latent diffusion model [67]: it uses a variational autoencoder (VAE) to obtain a more compact video representation and thus reduce computational complexity. During training, a target sequence $\\mathcal{X} = \\pmb{x}^{1:N}$ is first encoded into the latent space using the encoder $z_0^{1:N} = \\mathcal{E}(\\pmb{x}^{1:N})$ , and then perturbed by $\\pmb{z}_t^{1:N} = \\sqrt{\\bar{\\alpha}_t}\\pmb{z}_0^{1:N} + \\sqrt{1 - \\bar{\\alpha}_t}\\epsilon^{1:N}$ , where $\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ is Gaussian noise, and $\\bar{\\alpha}_t$ is the noise level at step $t$ of $T$ noisings steps. The denoising network $\\epsilon_{\\theta}$ is then trained to reverse this noisng process by optimizing the following objective:", + "bbox": [ + 89, + 348, + 482, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {\\theta}} \\mathbb {E} _ {(\\boldsymbol {x} ^ {1: N}, y), t, \\epsilon^ {1: N} \\sim \\mathcal {N} (\\boldsymbol {0}, \\boldsymbol {I})} \\left\\| \\epsilon^ {1: N} - \\epsilon_ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {z} _ {t} ^ {1: N}, t, y\\right) \\right\\| _ {2} ^ {2}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 584, + 480, + 623 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $y$ is the conditional input. Once trained, the model generates a video prompted by $y$ via iteratively denoising from pure noise $\\mathbf{z}_T^{1:N}$ , and then decoding the denoised latent with a decoder $\\hat{\\mathcal{X}} = \\mathcal{D}(\\hat{\\mathbf{z}}_0^{1:N})$ .", + "bbox": [ + 89, + 623, + 482, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Multi-modal Geometric 4D Diffusion", + "text_level": 1, + "bbox": [ + 89, + 694, + 410, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We first provide a more precise description of the 4D multimodal representation output by our model, and then explain how it is encoded in the latent space for generation.", + "bbox": [ + 89, + 715, + 482, + 761 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-modal geometric representations. The dynamic 3D structure of a scene is represented by a sequence of point maps $\\{\\pmb{X}^i\\}_{i=1}^N$ , one for each of its $N$ frames. Let $(u, v)$ denote the pixel coordinates in the image plane. Then, the value $X_{uv}^i \\in \\mathbb{R}^3$ is the 3D coordinate of the scene point that lands at pixel $(u, v)$ in frame $I^i$ , expressed in the reference frame of camera $i = 1$ . Because the reference frame is fixed and independent of the time-varying viewpoint, we call these point maps viewpoint-invariant. The", + "bbox": [ + 89, + 763, + 482, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "advantages of this representation are convincingly demonstrated by DUSt3R [92]. For a static scene, or by knowing which image pixels correspond to the static part of a scene, knowledge of the point maps allows recovery of the intrinsic and extrinsic camera parameters as well as the scene depth. This is done by solving an optimization problem that aligns the dynamic point maps with a pinhole camera model.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As noted in Sec. 1, while point maps $\\{\\pmb{X}^i\\}_{i=1}^N$ fully encode the 4D geometry of the scene, they are not effective for all parts of the scene. Their dynamic range is limited, and they are not even defined for points at infinity (e.g. sky). Hence, we consider two additional modalities: disparity maps $\\{\\pmb{D}^i\\}_{i=1}^N$ and camera ray maps $\\{\\pmb{r}^i\\}_{i=1}^N$ , also encouraged by prior evidence [14, 33, 49] that diffusion models can benefit from learning to predict multiple quantities. Disparity maps are not viewpoint-invariant, but have a better dynamic range than point maps (the disparity is zero for points at infinity). Ray maps represent only the camera parameters and are defined for all image pixels, independent of the scene geometry. For the disparity map, $D_{uv}^i$ is the disparity (inverse depth) of the scene point that lands at pixel $(u,v)$ , as seen in frame $I^i$ . For the ray map, we adopt Plücker coordinates [75, 97, 118], i.e., $\\pmb{r}_{uv} = (\\pmb{d}_{uv}, \\pmb{m}_{uv})$ , where $\\pmb{d}_{uv} = \\mathbf{R}^\\top \\mathbf{K}^{-1}(u,v,1)^\\top$ is the ray direction, and $\\pmb{m}_{uv} = -\\mathbf{R}^\\top \\mathbf{t} \\times \\pmb{d}_{uv}$ , where $(\\mathbf{R}, \\mathbf{K}, \\mathbf{t})$ are the camera's rotation, calibration, and translation parameters.", + "bbox": [ + 511, + 196, + 906, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-modal latent encoding. The three modalities come in the form of images and can thus be naturally predicted by the video diffusion architecture. However, this requires first mapping them to the latent space, for which we need suitable versions of the encoder $\\mathcal{E}$ and decoder $\\mathcal{D}$ from Sec. 3.1. Related prior work [14, 28] for depth prediction simply repurposes a pre-trained image encoder-decoder without modification. We found this to work well for disparity and ray maps, but not for point maps. Hence, for the point maps only, we fine-tune the pre-trained decoder $\\mathcal{D}$ using the following objective function [100]:", + "bbox": [ + 511, + 487, + 906, + 654 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = - \\sum_ {u v} \\ln \\frac {1}{\\sqrt {2} \\sigma_ {u v}} \\exp - \\frac {\\sqrt {2} \\ell_ {1} (\\mathcal {D} (\\mathcal {E} (\\boldsymbol {X})) _ {u v} , \\boldsymbol {X} _ {u v})}{\\sigma_ {u v}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 665, + 903, + 714 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\sigma \\in \\mathbb{R}^{H\\times W}$ is the uncertainty of the reconstructed point map, which is also predicted by an additional branch of our VAE decoder. We leave the encoder $\\mathcal{E}$ unchanged to modify the latent space as little as possible; instead, we normalize the point maps to the range $[-1,1]$ to make them more compatible with the pre-trained image encoder.", + "bbox": [ + 511, + 715, + 903, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video conditioning. The original video diffusion model is conditioned on a single image, but here we need to condition it on the entire input video $\\mathcal{I} = \\{I^i\\}_{i=1}^N$ . To this end, we use a hybrid conditioning mechanism with two streams.", + "bbox": [ + 511, + 809, + 903, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 2, in one stream, we extract a global representation of each frame $\\pmb{I}^i$ by passing it to", + "bbox": [ + 511, + 869, + 903, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "CLIP [64] followed by a lightweight learnable query transformer [1]. These vectors are incorporated in the transformer via cross-attention layers injected in each U-Net block. In the other stream, we extract local spatial features from the VAE encoder and concatenate them channel-wise to the noised latents, encoding the generated 4D modalities $\\{(D^i,X^i,r^i)\\}_{i = 1}^N$ .", + "bbox": [ + 89, + 90, + 483, + 198 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Multi-Modal Alignment", + "text_level": 1, + "bbox": [ + 89, + 207, + 312, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As noted, Geo4D predicts several non-independent geometric modalities. Furthermore, processing all frames of a long monocular video simultaneously with a video diffusion model is computationally prohibitive. Therefore, during inference, we use a temporal sliding window that segments the video into multiple overlapping clips, with partial overlap to facilitate joining them. The goal of this section is to fuse the resulting multi-modal and multi-window data into a single, coherent reconstruction of the entire video.", + "bbox": [ + 89, + 229, + 483, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Temporal sliding window. Given a video $\\mathcal{I} = \\{\\pmb{I}^i\\}_{i=1}^N$ with $N$ frames, we divide it into several video clips $\\mathcal{G} = \\{g^k\\}$ , $k \\in S$ , where each clip $g^k$ contains $V$ frames $\\{I^i\\}_{i=k}^{k+V-1}$ , and the set of starting indices is $\\mathcal{S} = \\{0, s, 2s, \\ldots, \\left\\lfloor \\frac{N-V}{s} \\right\\rfloor s\\} \\cup \\{N-V\\}$ . Here, $s$ is the sliding window stride. The final term $\\{N-V\\}$ ensures that the last clip always includes the final frames of the video.", + "bbox": [ + 89, + 368, + 483, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Alignment objectives. First, given the predicted point maps $X^{i,g}$ for each frame $i$ in each video clip $g \\in \\mathcal{G}$ , we derive corresponding globally aligned point maps in world coordinates, as well as the relative camera motion and scale parameters. We denote these quantities with the p subscript to emphasize that they are inferred from the point map predictions. To do so, we extend the pairwise global alignment loss from DUSt3R to a group-wise one:", + "bbox": [ + 89, + 479, + 483, + 599 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {p}} \\left(\\boldsymbol {X}, \\lambda_ {\\mathrm {p}} ^ {g}, \\boldsymbol {P} _ {\\mathrm {p}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\sum_ {u v} \\left\\| \\frac {\\boldsymbol {X} _ {u v} ^ {i} - \\lambda_ {\\mathrm {p}} ^ {g} \\boldsymbol {P} _ {\\mathrm {p}} ^ {g} \\boldsymbol {X} _ {u v} ^ {i , g}}{\\boldsymbol {\\sigma} _ {u v} ^ {i , g}} \\right\\| _ {1}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 609, + 480, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{\\mathrm{p}}^{g}$ and $P_{\\mathrm{p}}^{g} = [\\mathbf{R}_{\\mathrm{p}}^{g}|\\beta_{\\mathrm{p}}^{g}]$ denote the group-wise scale and transformation matrix that align the group-relative point maps $X^{i,g}$ to the point maps $X^i$ expressed in the global reference frame. $\\sigma_{uv}^{i,g}$ denotes the uncertainty of the point map for frame $i$ in group $g$ at pixel $(u,v)$ . We further parameterize each of these point maps as $X_{uv}^{i} = \\mathbf{R}_{\\mathrm{p}}^{i^{\\top}}\\mathbf{K}_{\\mathrm{p}}^{i^{-1}}D_{\\mathrm{p},uv}^{i^{-1}}(u,v,1) + o_{\\mathrm{p}}^{i}$ in terms of each camera's calibration $\\mathbf{K}_{\\mathrm{p}}^{i}$ , world-to-camera rotation $\\mathbf{R}_{\\mathrm{p}}^{i}$ , and center $o_{\\mathrm{p}}^{i}$ expressed in the global reference frame, and the disparity map $D_{\\mathrm{p}}^{i}$ . Substituting this expression into the loss function (4) and minimizing it, we can thus recover $\\mathbf{K}_{\\mathrm{p}}^{i},\\mathbf{R}_{\\mathrm{p}}^{i},o_{\\mathrm{p}}^{i}, D_{\\mathrm{p}}^{i},\\lambda_{\\mathrm{p}}^{g},P_{\\mathrm{p}}^{g}$ from the predicted point maps.", + "bbox": [ + 89, + 662, + 483, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The steps above infer the disparity maps $D_{\\mathrm{p}}^{i}$ from the point maps, but the model also predicts disparity maps $D_{\\mathrm{d}}^{i}$ directly, where the d subscript denotes disparity prediction.", + "bbox": [ + 89, + 853, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We introduce the following loss to align them:", + "bbox": [ + 513, + 90, + 821, + 106 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d}} \\left(\\boldsymbol {D} _ {\\mathrm {p}}, \\lambda_ {\\mathrm {d}} ^ {g}, \\beta_ {\\mathrm {d}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left\\| \\boldsymbol {D} _ {\\mathrm {p}} ^ {i} - \\lambda_ {\\mathrm {d}} ^ {g} \\boldsymbol {D} _ {d} ^ {i, g} - \\beta_ {\\mathrm {d}} ^ {g} \\right\\| _ {1}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 113, + 906, + 148 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{\\mathrm{d}}^{g}$ and $\\beta_{\\mathrm{d}}^{g}$ are optimized scale and shift parameters.", + "bbox": [ + 511, + 156, + 898, + 172 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, the ray maps $\\pmb{r}$ also encode camera pose. To align them with the global camera parameters $(\\mathbf{R}_{\\mathrm{p}},\\mathbf{K}_{\\mathrm{p}},\\boldsymbol{o}_{\\mathrm{p}})$ obtained from the point map, we first solve an optimization problem to extract the camera parameters from the ray map $\\pmb{r}^{i,g} = \\langle \\pmb{d}^{i,g},\\pmb{m}^{i,g}\\rangle$ for each group $g$ at frame $i$ . Following Ray Diffusion [114], the camera center $\\pmb{o}_{\\mathrm{c}}^{i,g}$ is solved by finding the 3D world coordinate closest to the intersection of all rays:", + "bbox": [ + 511, + 172, + 906, + 292 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} = \\arg \\min _ {\\boldsymbol {p} \\in \\mathbb {R} ^ {3}} \\sum_ {u \\in H, v \\in W} \\| \\boldsymbol {p} \\times \\boldsymbol {d} _ {u v} ^ {i, g} - \\boldsymbol {m} _ {u v} ^ {i, g} \\| ^ {2}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 300, + 906, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The camera extrinsics are solved by optimizing for the matrix $\\mathbf{H}$ that transforms the predicted per-pixel ray directions $d_{uv}^{i,g}$ to the ray directions $\\mathbf{u}_{uv}$ of a canonical camera:", + "bbox": [ + 511, + 340, + 905, + 386 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} ^ {i, g} = \\underset {\\| \\mathbf {H} \\| = 1} {\\arg \\min } \\sum_ {u \\in H, v \\in W} \\left\\| \\mathbf {H} d _ {u v} ^ {i, g} \\times \\mathbf {u} _ {u v} \\right\\|. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 393, + 906, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then the world-to-camera rotation matrix $\\mathbf{R}_c^{i,g}$ and intrinsic matrix $\\mathbf{K}_c^{i,g}$ can be solved using the RQ-decomposition of $\\mathbf{H}^{i,g}$ . Finally, the camera trajectory alignment loss is:", + "bbox": [ + 511, + 436, + 906, + 482 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {\\mathrm {p}}, \\mathbf {R} _ {\\mathrm {c}} ^ {g}, \\beta_ {\\mathrm {c}} ^ {g}, \\lambda_ {\\mathrm {c}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {c}} ^ {g} \\mathbf {R} _ {\\mathrm {c}} ^ {i, g} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} \\right. \\\\ \\left. + \\left\\| \\lambda_ {\\mathrm {c}} ^ {g} \\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} + \\beta_ {\\mathrm {c}} ^ {g} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right), \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 488, + 903, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $R_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}$ are learnable group-wise rotation matrix, translation vector, and scale, respectively, to align the global camera trajectory $(\\mathbf{R}_p, \\mathbf{o}_p)$ and the predicted ones $(\\mathbf{R}_c, \\mathbf{o}_c)$ . Following MonST3R [113], we also use a loss to smooth the camera trajectory:", + "bbox": [ + 511, + 575, + 905, + 652 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {s}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {p}\\right) = \\sum_ {i = 1} ^ {N} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} + \\left\\| \\boldsymbol {o} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 659, + 903, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The final optimization objective is the weighted combination of the losses above:", + "bbox": [ + 511, + 713, + 905, + 742 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {a l l}} = \\alpha_ {1} \\mathcal {L} _ {\\mathrm {p}} + \\alpha_ {2} \\mathcal {L} _ {\\mathrm {d}} + \\alpha_ {3} \\mathcal {L} _ {\\mathrm {c}} + \\alpha_ {4} \\mathcal {L} _ {\\mathrm {s}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 752, + 903, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A note on the invariants. The model predicts point maps, disparity maps, and ray map origins up to scale, as this cannot be uniquely determined from a monocular video. The disparity map is also recovered up to a translation, which discounts the focal length (this is sometimes difficult to estimate due to the dolly zoom effect). Likewise, the ray map origin is recovered up to a shift, necessary to allow normalizing these maps.", + "bbox": [ + 511, + 779, + 906, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c076f88109dab7c9696980c607a9ee0e70c12cc68a7a1debc504756168c7c010.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryMethodSintel [5]Bonn [55]KITTI [17]
Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑
Single-frame depthMarigold [28]0.53251.50.09193.10.14979.6
Depth-Anything-V2 [106]0.36755.40.10692.10.14080.4
Video depthNVDS [95]0.40848.30.16776.60.25358.8
ChronoDepth [70]0.68748.60.10091.10.16775.9
DepthCrafter* [22]0.27069.70.07197.20.10489.6
Video depth & Camera poseRobust-CVD [32]0.70347.8
CasualSAM [116]0.38754.70.16973.70.24662.2
MonST3R [113]0.33558.50.06396.40.10489.5
Geo4D (Ours)0.20573.50.05997.20.08693.7
", + "bbox": [ + 106, + 88, + 890, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Video depth estimation on Sintel [5], Bonn [55] and KITTI [17] datasets. We follow the evaluation protocols established in recent MonST3R [113] for a fair comparison. Notably, results for DepthCrafter* are reported from its latest version (v1.0.1). The Best and the second best results are highlighted.", + "bbox": [ + 89, + 276, + 906, + 318 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 89, + 325, + 225, + 343 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 89, + 349, + 299, + 368 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training datasets. Geo4D is trained exclusively on synthetic datasets, yet demonstrates strong generalization to real-world videos. Specifically, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. See the Supp. Mat Tab. 5 for details.", + "bbox": [ + 89, + 373, + 483, + 465 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training. Our Geo4D is initialized with the weights of DynamiCrafter [102] and trained using AdamW [48] with a learning rate of $1 \\times 10^{-5}$ and a batch size of 32. We use a progressive training strategy to improve convergence and stability. First, we train the model to generate a single geometric modality, i.e., the point maps, at a fixed resolution of $512 \\times 320$ . Next, we introduce a multi-resolution training scheme to improve generalization and robustness, which includes various resolutions: $512 \\times 384$ , $512 \\times 320$ , $576 \\times 256$ , $640 \\times 192$ . Finally, we progressively add additional geometric modalities, i.e., the ray and depth maps. Training is conducted on 4 NVIDIA H100 GPUs with a total training time of approximately one week.", + "bbox": [ + 89, + 468, + 483, + 665 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Inference. As described in Sec. 3.2, given an $N$ -frame video as input, we first split it into overlapping clips $\\mathcal{G}$ , each containing $V = 16$ frames, with a stride of $s = 4$ . Each video clip is encoded and fed to the diffusion model to sample multi-modal 4D parameters $(X^{i,g}, D^{i,g}, r^{i,g})$ for the video. For sampling, we use DDIM [77] with 5 steps. Finally, the alignment algorithm in Sec. 3.2 is used to fuse the clips into a globally coherent 4D reconstruction of the entire video.", + "bbox": [ + 89, + 667, + 483, + 804 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Video Depth Estimation", + "text_level": 1, + "bbox": [ + 89, + 816, + 312, + 834 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Testing data. Our hypothesis is that, despite being trained on synthetic data, our model can generalize well to out-of-distribution synthetic and real data, as it is based on a pre-trained video diffusion model. To test this hypothe", + "bbox": [ + 89, + 839, + 483, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "sis, we evaluate our model on three benchmarks: Sintel [5] is a synthetic dataset that provides accurate depth annotations, covering diverse scenes with complex camera motion. KITTI [17] is a large driving dataset collected using stereo cameras and LiDAR sensors. Bonn [55] focuses on dynamic indoor scenes. To ensure fair comparisons, we follow the evaluation protocol used by MonST3R [113], where depth sequences are uniformly sampled from the datasets, extracting 50-110 frames per sequence for evaluation.", + "bbox": [ + 511, + 325, + 906, + 464 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. Following the standard affine-invariant depth evaluation protocol [65], we align the predicted video depth with the ground-truth depth before computing metrics. However, unlike single-image depth estimation [28, 105, 106], where depth alignment is performed per frame, we enforce global scale consistency by applying a single scale and shift across the entire video sequence. For quantitative evaluation, we adopt two widely used depth metrics: absolute relative error (Abs Rel) and the percentage of inlier points (with a threshold value of $\\delta < 1.25$ ).", + "bbox": [ + 511, + 472, + 908, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We compare Geo4D to state-of-the-art single-frame depth estimation methods (Marigold [28] and Depth-Anything-V2 [106]), video depth prediction (NVDS [95], ChronoDepth [70], and DepthCrafter [22]), and joint video depth and camera pose prediction (Robust-CVD [32], CausalSAM [116], and MonST3R [113]).", + "bbox": [ + 511, + 633, + 908, + 726 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. As shown in Table 1, all versions of Geo4D outperform state-of-the-art methods by a large margin. This includes DepthCrafter [22] and MonST3R [113], the most recent video depth diffusion model and the dynamic extension of DUSt3R to dynamic scenes, respectively. Notably, while both Geo4D and DepthCrafter are based on the same video diffusion model (DynamiCrafter), our model outperforms DepthCrafter in Abs Rel by $24.0\\%$ on Sintel and $17.3\\%$ on KITTI, despite solving a more general problem. Qualitatively, Fig. 3 shows that Geo4D achieves more consistent results, especially for fast-moving objects.", + "bbox": [ + 511, + 734, + 910, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg", + "image_caption": [ + "Figure 3. Qualitative results comparing Geo4D with MonST3R [113]. Attributed to our group-wise inference manner and prior geometry knowledge from pretrained video diffusion, our model successfully produces consistent 4D geometry under fast motion (first row) and deceptive reflection in the water (second row)." + ], + "image_footnote": [], + "bbox": [ + 91, + 95, + 200, + 448 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 95, + 331, + 448 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 94, + 553, + 448 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 558, + 97, + 669, + 448 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 95, + 903, + 448 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Camera Pose Estimation", + "text_level": 1, + "bbox": [ + 89, + 507, + 316, + 522 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Setup. We evaluate the performance of Geo4D on both the synthetic Sintel [5] dataset and the realistic TUM-dynamics [78] dataset. We follow the same evaluation protocol as in MonST3R [113]. Specifically, on Sintel, we select 14 dynamic sequences, and for TUM-dynamics, we sample the first 90 frames of each sequence with a temporal stride of 3. After aligning the predicted camera trajectory with the ground truth using the Umayama algorithm, we calculate three commonly used metrics: Absolute Translation Error (ATE), Relative Translation Error (RPE-T), and Relative Rotation Error (RPE-R). We compare our method with other state-of-the-art discriminative methods, which jointly predict camera pose and depth, including Robust-CVD [32], CausalSAM [116], and MonST3R [113].", + "bbox": [ + 88, + 530, + 482, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. To the best of our knowledge, Geo4D is the first method that uses a generative model to estimate camera parameters in a dynamic scene. As shown in Tab. 2, compared to existing non-generative alternatives, we achieve much better camera rotation prediction (RPE-R) and comparable camera translation prediction (ATE and RPE-T).", + "bbox": [ + 89, + 744, + 482, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Qualitative Comparison", + "text_level": 1, + "bbox": [ + 89, + 847, + 312, + 864 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4D reconstruction. We compare Geo4D with the state-of-the-art MonST3R method on the DAVIS [23] dataset. Up-", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/7038d9900452bdd46bac1ace91e6854e016dc9bd721e4b5891da5d7169423852.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSintelTUM-dynamics
ATE ↓RPE-T ↓RPE-R ↓ATE ↓RPE-T ↓RPE-R ↓
Robust-CVD [32]0.3600.1543.4430.1530.0263.528
CasualSAM [116]0.1410.0350.6150.0710.0101.712
MonST3R [113]0.1080.0420.7320.0630.0091.217
Geo4D (Ours)0.1850.0630.5470.0730.0200.635
", + "bbox": [ + 513, + 506, + 903, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Quantitative evaluation for camera pose estimation. We achieve comparable camera pose estimation performance with other discriminative SOTA methods.", + "bbox": [ + 511, + 595, + 906, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "grading from pairwise alignment as in MonST3R to our group-wise alignment improves temporal consistency, leading to a more stable and globally coherent 4D reconstruction of point maps and camera trajectory, particularly in highly dynamic scenes. As shown in the top row of Fig. 3, Geo4D successfully tracks the racing car in 4D, whereas MonST3R struggles due to the rapid motion between pairs of images. Furthermore, likely due to the strong prior captured by the pre-trained video generative model, Geo4D correctly reconstructs the reflection of the flamingo in the water (second row in Fig. 3), whereas MonST3R misinterprets the reflection as a foreground object, resulting in incorrect depth.", + "bbox": [ + 511, + 652, + 906, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Video depth prediction. We compare Geo4D with state-of-the-art video depth predictors MonST3R [113] and DepthCrafter [22] on the Sintel [5] dataset. Qualitatively, Geo4D produces more detailed geometry, for instance for", + "bbox": [ + 511, + 839, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 93, + 88, + 906, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg", + "image_caption": [ + "Figure 4. Qualitative video depth results comparing Geo4D with MonST3R [113] and DepthCrafter [22]. Owing to our proposed multimodal training and alignment, as well as the prior knowledge from diffusion, our method can infer a more detailed structure (first row) and a more accurate spatial arrangement from video (second row).", + "Table 3. Ablation study for the different modalities of the geometric representation on the Sintel [5] dataset. We demonstrate the effectiveness of our key design choices that both leverage multi-modality as additional training supervision signal and postprocess through our proposed multi-modal alignment algorithm will improve the overall performance." + ], + "image_footnote": [], + "bbox": [ + 91, + 258, + 903, + 366 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/5364a2a6461c500f21711140d4a24a474dee3729cfe59351e73e5d692522a123.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Strides / frameVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
150.920.21372.40.2100.0920.574
81.240.21272.80.2220.0740.524
41.890.20573.50.1850.0630.547
23.260.20472.90.1810.0580.518
", + "bbox": [ + 91, + 417, + 480, + 497 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablation study for the temporal sliding window stride on the Sintel [5] dataset. There is a trade-off between performance and inference speed.", + "bbox": [ + 89, + 500, + 482, + 541 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the rope on the stick in the first row of Fig. 4, and a better spatial arrangement between different dynamic objects, as shown in the second row of Fig. 4.", + "bbox": [ + 89, + 550, + 483, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 606, + 243, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We ablate our key design choices and the effect of different modalities on the Sintel dataset.", + "bbox": [ + 89, + 628, + 482, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We study the effect of multi-modality in Tab. 3. The three modalities—point map, disparity map, and ray map—can be used either at training or inference time, or both. The first two rows show that the diffusion model trained with point maps as a single modality performs worse in both video depth and camera pose estimation than the diffusion model trained with all three modalities. Therefore, the other two modalities, even if they can be seen as redundant, serve as additional supervisory signals during training, which improves the generalization ability of the diffusion model.", + "bbox": [ + 88, + 657, + 482, + 809 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We then investigate the effectiveness of our multi-modal alignment algorithm. Compared with the second to the fourth row in Tab. 3, which leverage only a single modality during inference, multi-modal alignment optimization (last row) achieves the best performance, showing the benefits of fusing the multiple modalities at inference time.", + "bbox": [ + 89, + 810, + 483, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We ablate the sliding window stride in Tab. 4. Results improve with a shorter stride, in part because this means that more windows and estimates are averaged, reducing the variance of the predictions by the denoising diffusion model, which is stochastic. We choose stride $s = 4$ for our main results to balance runtime and performance. Note that MonST3R [113] requires 2.41 seconds to process one frame under the same setting, so our method is 1.27 times faster than MonST3R [113].", + "bbox": [ + 511, + 419, + 906, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 511, + 569, + 761, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have introduced Geo4D, a novel approach that adapts a video generator for dynamic 4D reconstruction. By building on a pre-trained video generator, Geo4D achieves excellent generalization to real data despite being trained only on synthetic 4D data. We have also demonstrated the benefits of predicting multiple modalities and fusing them at test time via optimization. Our model outperforms state-of-the-art methods on video depth and camera rotation prediction, particularly in challenging dynamic scenes.", + "bbox": [ + 511, + 594, + 905, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Despite these successes, our approach has limitations. One is that the point map encoder-decoder is still not entirely accurate, which in turn is a bottleneck for the overall reconstruction quality.", + "bbox": [ + 511, + 731, + 903, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our approach also opens a path to integrating 4D geometry into video foundation models, e.g., to generate 3D animations from text, or to provide a more actionable signal when the video model is used as a proxy for a world model.", + "bbox": [ + 511, + 791, + 903, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. The authors of this work were supported by Clarendon Scholarship, ERC 101001212-UNION, and EPSRC EP/Z001811/1 SYN3D.", + "bbox": [ + 511, + 854, + 903, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, Jenia Jitsev, Simon Kornblith, Pang Wei Koh, Gabriel Ilharco, Mitchell Wortman, and Ludwig Schmidt. Openflamingo: An opensource framework for training large autoregressive vision-language models, 2023. 5", + "[2] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. Bedlam: A synthetic dataset of bodies exhibiting detailed lifelike animated motion, 2023. 6, 15", + "[3] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 3", + "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 22563-22575, 2023. 3", + "[5] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In European Conference on Computer Vision (ECCV), 2012. 6, 7, 8, 15, 16", + "[6] Yohann Cabon, Naila Murray, and Martin Humenberger. Virtual kitti 2, 2020. 6, 15", + "[7] David Charatan, Sizhe Lester Li, Andrea Tagliasacchi, and Vincent Sitzmann. pixelsplat: 3d gaussian splats from image pairs for scalable generalizable 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19457-19467, 2024. 2", + "[8] Xingyu Chen, Yue Chen, Yuliang Xiu, Andreas Geiger, and Anpei Chen. Easi3r: Estimating disentangled motion from dust3r without training. arXiv preprint arXiv:2503.24391, 2025. 3", + "[9] Yuedong Chen, Haofei Xu, Chuanxia Zheng, Bohan Zhuang, Marc Pollefeys, Andreas Geiger, Tat-Jen Cham, and Jianfei Cai. MVSplat: efficient 3d gaussian splattering from sparse multi-view images. arXiv, 2403.14627, 2024. 2", + "[10] Yuedong Chen, Chuanxia Zheng, Haofei Xu, Bohan Zhuang, Andrea Vedaldi, Tat-Jen Cham, and Jianfei Cai. Mvsplat360: Feed-forward 360 scene synthesis from sparse views. In Neural Information Processing Systems (NeurIPS), 2024. 3", + "[11] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In European conference on computer vision (ECCV), pages 628-644. Springer, 2016. 2", + "[12] Wen-Hsuan Chu, Lei Ke, and Katerina Fragkiadaki. Dreamscene4d: Dynamic multi-object scene generation from monocular videos. Advances in Neural Information Processing Systems (NeurIPS), 2024. 2" + ], + "bbox": [ + 99, + 114, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Yiquan Duan, Xianda Guo, and Zheng Zhu. Diffusion-depth: Diffusion denoising approach for monocular depth estimation. In European Conference on Computer Vision (ECCV), pages 432-449. Springer, 2024. 3", + "[14] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. In European Conference on Computer Vision (ECCV), pages 241-258. Springer, 2024. 3, 4", + "[15] Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3", + "[16] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22930-22941, 2023. 3", + "[17] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The KITTI dataset. International Journal of Robotics Research (IJRR), 2013. 6", + "[18] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In Proceedings of the IEEE/CVF international conference on computer vision (CVPR), pages 9785-9795, 2019. 2", + "[19] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. In International Conference on Learning Representations (ICLR), 2024. 3", + "[20] Junlin Han, Filippos Kokkinos, and Philip Torr. Vfusion3d: Learning scalable 3d generative models from video diffusion models. In European Conference on Computer Vision (ECCV), pages 333-350. Springer, 2024. 3", + "[21] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Neural Information Processing Systems (NeurIPS), 35:8633-8646, 2022. 3", + "[22] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 2, 6, 7, 8, 15", + "[23] Jia-Bin Huang, Sing Bing Kang, Narendra Ahuja, and Johannes Kopf. Temporally coherent completion of dynamic video. In ACM, 2016. 2, 7", + "[24] Matthias Innmann, Michael Zollhöfer, Matthias Nießner, Christian Theobalt, and Marc Stamminger. Volumedeform: Real-time volumetric non-rigid reconstruction. In European conference on computer vision (ECCV), pages 362-379. Springer, 2016. 2", + "[25] Tomas Jakab, Ruining Li, Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Farm3d: Learning articulated 3d animals by distilling 2d diffusion. In 2024 International" + ], + "bbox": [ + 522, + 92, + 903, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on 3D Vision (3DV), pages 852-861. IEEE, 2024. 3", + "[26] Yanqin Jiang, Chaohui Yu, Chenjie Cao, Fan Wang, Weiming Hu, and Jin Gao. *Animate3d: Animating any 3d model with multi-view video diffusion*. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3", + "[27] Zeren Jiang, Chen Guo, Manuel Kaufmann, Tianjian Jiang, Julien Valentin, Otmar Hilliges, and Jie Song. Multiply: Reconstruction of multiple people from monocular video in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2", + "[28] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9492-9502, 2024. 2, 3, 4, 6", + "[29] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023. 2, 3", + "[30] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 3", + "[31] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 3", + "[32] Johannes Kopf, Xuejian Rong, and Jia-Bin Huang. Robust consistent video depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1611-1621, 2021. 6, 7", + "[33] Akshay Krishnan, Xinchen Yan, Vincent Casser, and Abhijit Kundu. Orchid: Image latent diffusion for joint appearance and geometry generation. arXiv preprint arXiv:2501.13087, 2025. 3, 4", + "[34] Jiahui Lei, Yijia Weng, Adam Harley, Leonidas Guibas, and Kostas Daniilidis. Mosca: Dynamic gaussian fusion from casual videos via 4d motion scaffolds. arXiv preprint arXiv:2405.17421, 2024. 2", + "[35] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pages 71-91. Springer, 2024. 2", + "[36] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3", + "[37] Xuanyi Li, Daquan Zhou, Chenxu Zhang, Shaodong Wei, Qibin Hou, and Ming-Ming Cheng. Sora generates videos with stunning geometrical consistency. arXiv, 2402.17403, 2024. 2" + ], + "bbox": [ + 99, + 90, + 482, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 2", + "[39] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4273-4284, 2023. 2", + "[40] Zhengqi Li, Richard Tucker, Forrester Cole, Qianqian Wang, Linyi Jin, Vickie Ye, Angjoo Kanazawa, Aleksander Holynski, and Noah Snavely. Megasam: Accurate, fast and robust structure and motion from casual dynamic videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3", + "[41] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2018. 2", + "[42] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 300–309, 2023. 3", + "[43] Youtian Lin, Zuozhuo Dai, Siyu Zhu, and Yao Yao. Gaussian-flow: 4d reconstruction with dynamic 3d gaussian particle. In CVPR, 2024. 2", + "[44] Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model, 2024. 3", + "[45] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), pages 9298–9309, 2023. 3", + "[46] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. In The Twelfth International Conference on Learning Representations (ICLR), 2024.", + "[47] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9970-9980, 2024. 3", + "[48] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR, 2019. 6", + "[49] Yuanxun Lu, Jingyang Zhang, Tian Fang, Jean-Daniel Nahmias, Yanghai Tsin, Long Quan, Xun Cao, Yao Yao, and Shiwei Li. Matrix3d: Large photogrammetry model all-in-one. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3, 4" + ], + "bbox": [ + 522, + 92, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Lukas Mehl, Jenny Schmalfuss, Azin Jahedi, Yaroslava Nalivayko, and Andres Bruhn. Spring: A high-resolution high-detail dataset and benchmark for scene flow, optical flow and stereo. In Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 6, 15", + "[51] Luke Melas-Kyriazi, Iro Laina, Christian Rupprecht, and Andrea Vedaldi. Realfusion: 360deg reconstruction of any object from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 8446-8455, 2023. 3", + "[52] B Mildenhall, PP Srinivasan, M Tancik, JT Barron, R Ramamoorthi, and R Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision (ECCV), 2020. 2", + "[53] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 343-352, 2015. 2", + "[54] NVIDIA, Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, Daniel Dworakowski, Jiaojiao Fan, Michele Fenzi, Francesco Ferroni, Sanja Fidler, Dieter Fox, Songwei Ge, Yunhao Ge, Jinwei Gu, Siddharth Gururani, Ethan He, Jiahui Huang, Jacob Huffman, Pooya Jannaty, Jingyi Jin, Seung Wook Kim, Gergely Klár, Grace Lam, Shiyi Lan, Laura Leal-Taixe, Anqi Li, Zhaoshuo Li, Chen-Hsuan Lin, Tsung-Yi Lin, Huan Ling, Ming-Yu Liu, Xian Liu, Alice Luo, Qianli Ma, Hanzi Mao, Kaichun Mo, Arsalan Mousavian, Seungjun Nah, Sriharsha Niverty, David Page, Despoina Paschalidou, Zeeshan Patel, Lindsey Pavao, Morteza Ramezanali, Fitsum Reda, Xiaowei Ren, Vasanth Rao Naik Sabavat, Ed Schmerling, Stella Shi, Bartosz Stefaniak, Shitao Tang, Lyne Tchapmi, Przemek Tredak, Wei-Cheng Tseng, Jibin Varghese, Hao Wang, Haoxiang Wang, Heng Wang, Ting-Chun Wang, Fangyin Wei, Xinyue Wei, Jay Zhangjie Wu, Jiashu Xu, Wei Yang, Lin Yen-Chen, Xiaohui Zeng, Yu Zeng, Jing Zhang, Qinsheng Zhang, Yuxuan Zhang, Qingqing Zhao and Artur Zolkowski. Cosmos world foundation model platform for physical ai. arXiv, 2501.03575, 2025. 2", + "[55] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguère, and C. Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862, 2019. 6", + "[56] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2", + "[57] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 2" + ], + "bbox": [ + 99, + 90, + 482, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[58] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: a higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 40 (6):1-12, 2021. 2", + "[59] Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjoyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Rocktäschel. Genie 2: A large-scale foundation world model, 2024. 2", + "[60] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European conference on computer vision (ECCV), pages 523-540. Springer, 2020. 2", + "[61] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3", + "[62] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 2", + "[63] Lingteng Qiu, Guanying Chen, Xiaodong Gu, Qi Zuo, Mutian Xu, Yushuang Wu, Weihao Yuan, Zilong Dong, Liefeng Bo, and Xiaoguang Han. Richdreamer: A generalizable normal-depth diffusion model for detail richness in text-to-3d. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9914–9925, 2024. 3", + "[64] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), pages 8748-8763. PmLR, 2021. 5", + "[65] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 44:1623-1637, 2019. 6", + "[66] Jiawei Ren, Kevin Xie, Ashkan Mirzaei, Hanxue Liang, Xiaohui Zeng, Karsten Kreis, Ziwei Liu, Antonio Torralba, Sanja Fidler, Seung Wook Kim, and Huan Ling. L4gm: Large 4d gaussian reconstruction model. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3", + "[67] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 4" + ], + "bbox": [ + 522, + 92, + 903, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[68] Kyle Sargent, Zizhang Li, Tanmay Shah, Charles Herrmann, Hong-Xing Yu, Yunzhi Zhang, Eric Ryan Chan, Dmitry Lagun, Li Fei-Fei, Deqing Sun, et al. Zeronvs: Zero-shot 360-degree view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9420–9429, 2024. 3", + "[69] Saurabh Saxena, Charles Herrmann, Junhwa Hur, Abhishek Kar, Mohammad Norouzi, Deqing Sun, and David J Fleet. The surprising effectiveness of diffusion models for optical flow and monocular depth estimation. Advances in Neural Information Processing Systems (NeurIPS), 36:39443-39469, 2023. 3", + "[70] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. arXiv, 2406.01493, 2024. 6", + "[71] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3", + "[72] Yawar Siddiqui, Antonio Alliegro, Alexey Artemov, Tatiana Tommasi, Daniele Sirigatti, Vladislav Rosov, Angela Dai, and Matthias Nießner. Meshgpt: Generating triangle meshes with decoder-only transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19615-19625, 2024. 2", + "[73] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, Devi Parikh, Sonal Gupta, and Yaniv Taigman. Make-a-video: Text-to-video generation without text-video data. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3", + "[74] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2437-2446, 2019. 2", + "[75] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems (NeurIPS), 34:19313-19325, 2021. 2, 4", + "[76] Brandon Smart, Chuanxia Zheng, Iro Laina, and Victor Adrian Prisacariu. Splatt3r: Zero-shot gaussian splatting from uncalibrated image pairs. arXiv preprint arXiv:2408.13912, 2024. 2", + "[77] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. 6", + "[78] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of rgb-d slam systems. 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 573-580, 2012. 7", + "[79] Stanislaw Szymanowicz, Christian Rupprecht, and Andrea Vedaldi. Splatter Image: Ultra-fast single-view 3D recon" + ], + "bbox": [ + 99, + 90, + 485, + 900 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "struction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2", + "[80] Stanislaw Szymanowicz, Eldar Insafutdinov, Chuanxia Zheng, Dylan Campbell, João F. Henriques, Christian Rupprecht, and Andrea Vedaldi. Flash3D: Feed-forward generalisable 3D scene reconstruction from a single image. In Proceedings of the International Conference on 3D Vision (3DV), 2025. 2", + "[81] Stanislaw Szymanowicz, Jason Y Zhang, Pratul Srinivasan, Ruiqi Gao, Arthur Brussee, Aleksander Holynski, Ricardo Martin-Brualla, Jonathan T Barron, and Philipp Henzler. Bolt3d: Generating 3d scenes in seconds. arXiv preprint arXiv:2503.14445, 2025. 3", + "[82] Aether Team, Haoyi Zhu, Yifan Wang, Jianjun Zhou, Wenzheng Chang, Yang Zhou, Zizun Li, Junyi Chen, Chunhua Shen, Jiangmiao Pang, and Tong He. Aether: Geometric-aware unified world modeling. arXiv preprint arXiv:2503.18945, 2025. 3", + "[83] Shubham Tulsiani, Tinghui Zhou, Alexei A Efros, and Jitendra Malik. Multi-view supervision for single-view reconstruction via differentiable ray consistency. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2626-2634, 2017. 2", + "[84] S. Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis and Machine Intelligence, 13(4):376-380, 1991. 15", + "[85] Vikram Voleti, Chun-Han Yao, Mark Boss, Adam Letts, David Pankratz, Dmitry Tochilkin, Christian Laforte, Robin Rombach, and Varun Jampani. Sv3d: Novel multi-view synthesis and 3d generation from a single image using latent video diffusion. In European Conference on Computer Vision (ECCV), pages 439-457. Springer, 2024. 3", + "[86] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. In International Conference on 3D Vision (3DV), 2024. 2", + "[87] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12619-12629, 2023. 3", + "[88] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3", + "[89] Jianyuan Wang, Minghao Chen, Nikita Karaev, Andrea Vedaldi, Christian Rupprecht, and David Novotny. VGGT: Visual geometry grounded network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 1, 2", + "[90] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), pages 52-67, 2018. 2", + "[91] Qianqian Wang, Vickie Ye, Hang Gao, Weijia Zeng, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of" + ], + "bbox": [ + 522, + 92, + 906, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "motion: 4d reconstruction from a single video. In arXiv preprint arXiv:2407.13764, 2024. 2", + "[92] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. Dust3r: Geometric 3d vision made easy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20697-20709, 2024. 1, 2, 3, 4", + "[93] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian A. Scherer. Tartanair: A dataset to push the limits of visual slam. 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4909-4916, 2020. 6, 15", + "[94] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems (NeurIPS), 36:7594-7611, 2023. 3", + "[95] Yiran Wang, Min Shi, Jiaqi Li, Zihao Huang, Zhiguo Cao, Jianming Zhang, Ke Xian, and Guosheng Lin. Neural video depth stabilizer. In ICCV, 2023. 6", + "[96] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. Advances in Neural Information Processing Systems (NeurIPS), 36, 2024. 3", + "[97] Daniel Watson, William Chan, Ricardo Martin Brulla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 4", + "[98] Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain BRÉGIER, Yohann Cabon, Vaibhav ARORA, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jerome Revaud. CroCo: self-supervised pre-training for 3D vision tasks by cross-view completion. In Proc. NeurIPS, 2022. 2", + "[99] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In CVPR, 2024. 2", + "[100] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 1-10, 2020. 4", + "[101] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3", + "[102] Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. Dynamiccafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision (ECCV), pages 399-417. Springer, 2024. 1, 3, 4, 6" + ], + "bbox": [ + 93, + 92, + 482, + 898 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[103] Tian-Xing Xu, Xiangjun Gao, Wenbo Hu, Xiaoyu Li, Song-Hai Zhang, and Ying Shan. Geometrycrafter: Consistent geometry estimation for open-world videos with diffusion priors, 2025. 3", + "[104] Jianing Yang, Alexander Sax, Kevin J. Liang, Mikael Henaff, Hao Tang, Ang Cao, Joyce Chai, Franziska Meier, and Matt Feiszli. Fast3r: Towards 3d reconstruction of $1000+$ images in one forward pass. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2", + "[105] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Ji-ashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proc. CVPR, 2024. 6", + "[106] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. Advances in Neural Information Processing Systems (NeurIPS), 2024. 6", + "[107] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20331-20341, 2024. 2", + "[108] David Yifan Yao, Albert J. Zhai, and Shenlong Wang. Uni4d: Unifying visual foundation models for 4d modeling from a single video, 2025. 3", + "[109] Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024.3", + "[110] Xumin Yu, Yongming Rao, Ziyi Wang, Zuyan Liu, Jiwen Lu, and Jie Zhou. Pointr: Diverse point cloud completion with geometry-aware transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12498-12507, 2021. 2", + "[111] Yuheng Yuan, Qiuhong Shen, Xingyi Yang, and Xinchao Wang. 1000+ fps 4d gaussian splatting for dynamic scene rendering, 2025. 2", + "[112] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. International Journal of Computer Vision (IJCV), pages 1-15, 2024. 3", + "[113] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. In International Conference on Learning Representations (ICLR), 2025. 1, 2, 3, 5, 6, 7, 8", + "[114] Jason Y Zhang, Amy Lin, Moneish Kumar, Tzu-Hsuan Yang, Deva Ramanan, and Shubham Tulsiani. Cameras as rays: Pose estimation via ray diffusion. In International Conference on Learning Representations (ICLR), 2024. 5", + "[115] Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiatao Gu. World-consistent video diffusion with explicit 3d" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3", + "[116] Zhoutong Zhang, Forrester Cole, Zhengqi Li, Michael Rubinstein, Noah Snavely, and William T. Freeman. Structure and motion from casual videos. In European Conference on Computer Vision (ECCV), 2022. 6, 7", + "[117] Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. Unleashing text-to-image diffusion models for visual perception. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5729-5739, 2023. 3", + "[118] Chuanxia Zheng and Andrea Vedaldi. Free3d: Consistent novel view synthesis without 3d representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9720-9731, 2024. 3, 4", + "[119] Yang Zheng, Adam W Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 19855-19865, 2023. 6, 15" + ], + "bbox": [ + 91, + 92, + 482, + 375 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction Supplementary Material", + "text_level": 1, + "bbox": [ + 106, + 85, + 890, + 138 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this supplementary material, we provide additional information to supplement our main submission. The code is available here for research purposes: github.com/ jzr99/Geo4D", + "bbox": [ + 89, + 156, + 483, + 218 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 251, + 312, + 268 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6.1. Training Dataset", + "text_level": 1, + "bbox": [ + 89, + 282, + 256, + 299 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As shown in Tab. 5, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. Although all datasets are synthetic, we found that some depth pixels are missing in PointOdyssey [119]. To address this, we apply max pooling to inpaint the missing pixels. During training, we sample each dataset according to the ratios in Tab. 5. For each sample, we select 16 frames from the sequence, with the sampling stride randomly chosen from $\\{1,2,3\\}$ to allow our diffusion model to adapt to input videos with various frame rates.", + "bbox": [ + 89, + 311, + 483, + 477 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6.2. Optimization Details", + "text_level": 1, + "bbox": [ + 89, + 507, + 285, + 523 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The overall optimization process is outlined in Algorithm 1. We first predict all three modality maps using our diffusion model for each video clip $g$ . The predicted point maps are then roughly aligned based on the overlapping frames using the Umayama algorithm [84]. The camera intrinsic $\\mathbf{K}^k$ is initialized by minimizing the projection error of the point map $X^{k,g^k}$ in its reference (first) frame $k$ within each window group $g^k$ . The camera extrinsics are then initialized using the RANSAC PnP algorithm. In the first stage of optimization, the point maps are roughly disentangled into camera pose and depth map. The disparity map is then aligned with the global depth inferred from point maps by solving Eq. (5) from the main paper to obtain the scale and shift parameters. The camera parameters extracted from the predicted ray map are aligned with the global camera trajectory based on the reference (first) frame of each video clip $g$ via Eq. (8) from the main paper. After initializing all the alignment learnable parameters, including rotation $\\mathbf{R}_{*}^{g}$ , scale $\\lambda_{*}^{g}$ , and shift $\\beta_{*}^{g}$ across different modalities, where $* \\in \\{\\mathrm{p},\\mathrm{d},\\mathrm{c}\\}$ , we jointly optimize all the learnable parameters by Eq. (10). Specifically, we set the weights for each loss term in Eq. (10) as $\\alpha_{1} = 1, \\alpha_{2} = 2, \\alpha_{3} = 0.005, \\alpha_{4} = 0.015$ to roughly equalize the scale of the different losses.", + "bbox": [ + 89, + 536, + 483, + 900 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "Algorithm 1 Multi-Modal Alignment Optimization \n1: $X^{i,g}, D^{i,g}, r^{i,g} \\gets$ Predicted by our diffusion model \n2: $D_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g} \\gets$ Initialized by Umayama algorithm \n3: $K_{\\mathrm{p}}^{k} \\gets$ Optimized from $X^{k,g^k}$ \n4: $R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i} \\gets$ Initialized by Ransac PnP from pointmaps $X^i$ \n5: $R_{\\mathrm{c}}^{i,g}, o_{\\mathrm{c}}^{i,g} \\gets$ Initialized by Eqs. (6) and (7) from raymaps $r^{i,g}$ \n6: repeat \n7: if Iteration = Align start iteration then \n8: $\\lambda_{\\mathrm{d}}^{g}, \\beta_{\\mathrm{d}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{d}}$ (Eq. (5)) \n9: $R_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{c}}$ (Eq. (8)) \n10: else if Iteration < Align start iteration then \n11: $D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g}, \\gets \\arg \\min \\mathcal{L}_{\\mathrm{p}} + \\mathcal{L}_{\\mathrm{s}}$ \n12: else \n13: $D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{*}^{g}, R_{*}^{g}, \\beta_{*}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{all}}$ \n14: end if \n15: until max loop reached", + "bbox": [ + 514, + 155, + 903, + 406 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/865e847eb30e6f45bfa98558af28b890a5b797f29d27a850c9a4f209048b3886.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetScene type#Frames#SequencesRatio
PointOdyssey [119]Indoors/Outdoors200K13116.7%
TartanAir [93]Indoors/Outdoors1000K16316.7%
Spring [50]Outdoors6K3716.7%
VirtualKITTI [6]Driving43K32016.7%
BEDLAM [2]Indoors/Outdoors380K10K33.3%
", + "bbox": [ + 514, + 430, + 903, + 513 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/3a46001fcd7f623e3cf8dd49722f9a3f098ee6bcf7a68c89fbf981cd929c42a0.jpg", + "table_caption": [ + "Table 5. Details of training datasets. Our method only uses synthetic datasets for training." + ], + "table_footnote": [], + "table_body": "
StepsVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
10.22170.70.2340.0720.753
50.20573.50.1850.0630.547
100.20773.20.2120.0710.508
250.22072.20.2110.0740.564
", + "bbox": [ + 514, + 565, + 903, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 6. Ablation study for the DDIM sampling steps. on the Sintel [5] dataset.", + "bbox": [ + 511, + 662, + 903, + 691 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "7. Additional Analysis", + "text_level": 1, + "bbox": [ + 513, + 724, + 702, + 741 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "7.1. Ablating the Number of Denoising Steps", + "text_level": 1, + "bbox": [ + 511, + 753, + 857, + 770 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We study the influence of the number of denoising steps during inference. As shown in Tab. 6, the model achieves optimal performance after around 5 steps. Compared to the video generation task, where a larger number of denoising steps usually produces a more detailed generated video, 4D reconstruction is a more deterministic task, which requires fewer steps. Similar phenomena are also observed in [22], which uses a video generator for video depth estimation.", + "bbox": [ + 511, + 779, + 906, + 902 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg", + "image_caption": [ + "Figure 5. Additional qualitative results. Our method generalizes well to various scenes with different 4D objects and performs robustly against different camera and object motions." + ], + "image_footnote": [], + "bbox": [ + 89, + 85, + 911, + 691 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/a3bd301507763e8287ae60b8ae2ce0a502a3f782186fa27e1fb10da1c54d0b3a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
w/o fine-tuned0.21272.10.1920.0610.577
w fine-tuned0.20573.50.1850.0630.547
", + "bbox": [ + 91, + 758, + 480, + 823 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 7. Ablation study for the fine-tuned point map VAE on the Sintel [5] dataset. The fine-tuned point map VAE performs better than the original one.", + "bbox": [ + 89, + 824, + 482, + 866 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "7.2. Ablation Study for Fine-Tuned Point Map VAE", + "text_level": 1, + "bbox": [ + 513, + 761, + 906, + 779 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As stated in the main paper, we added an additional branch to predict the uncertainty for our point map VAE and fine-tuned it based on Eq. 3. We perform an ablation study on our fine-tuning strategy. As shown in Tab. 7, our fine-tuned point map VAE achieves consistently better performance on both video depth estimation and camera pose estimation tasks compared with the original pre-trained image VAE,", + "bbox": [ + 511, + 794, + 906, + 902 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg", + "image_caption": [ + "Figure 6. Visualization of different geometric modality maps." + ], + "image_footnote": [], + "bbox": [ + 93, + 88, + 482, + 205 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "demonstrating the necessity and effectiveness of our finetuning strategy.", + "bbox": [ + 89, + 246, + 482, + 275 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "7.3. Analysis of Multi-Modal Representation", + "text_level": 1, + "bbox": [ + 89, + 286, + 434, + 301 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Point maps (PMs) and disparity maps (DMs) are complementary. DMs better represent near objects, while PMs are more depth-agnostic (e.g., human vs house in Fig. 6 (b,c)). As in prior work, DMs are affine invariant (which here makes them range-compatible with the pretrained RGB VAE); their scale and shift, needed to recover undistorted geometry, are inferred by matching them to the predicted PMs. Ray maps (RMs) help infer the camera pose when PMs fail to represent points at infinity (such as the sky in Fig. 6 (e)). We observed that PMs tend to be noisier than DMs, so we prioritized modeling the PMs' uncertainty. Per-pixel uncertainty for ray maps are less meaningful given the high degree of correlation between individual rays. During multi-modal alignment, we align global point clouds with DMs in disparity space and with PMs in linear space. This naturally gives more weight to near points, which tend to be estimated well by DMs, and weighs points based on uncertainty with PMs, thus taking advantage of both modalities.", + "bbox": [ + 89, + 306, + 483, + 580 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "8. Visualization", + "text_level": 1, + "bbox": [ + 89, + 592, + 225, + 607 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 5 shows additional visualizations for indoor, outdoor, and driving scenes. Although our model is only trained on synthetic datasets, it generalizes to real-world data with diverse objects and motions.", + "bbox": [ + 89, + 618, + 483, + 678 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "9. Limitations", + "text_level": 1, + "bbox": [ + 89, + 691, + 212, + 707 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Although our method performs well and generalizes to a wide range of in-the-wild videos, it can struggle in cases involving significant changes in focal length or extreme camera motion throughout a sequence. This limitation likely stems from the lack of focal length variation in our training data. Incorporating more sequences with diverse camera movements and zooming effects could help mitigate this issue. Moreover, due to the inherent temporal attention mechanism in our network architecture, our approach currently supports only monocular video input. Extending the method to handle multi-view images or videos is a promising direction for future work.", + "bbox": [ + 89, + 717, + 482, + 898 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_model.json b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6ff02b8618a73b5d8a1e5e14e41c8a1569e42b02 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_model.json @@ -0,0 +1,3369 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.061, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.07961v2 [cs.CV] 19 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.106, + 0.131, + 0.892, + 0.152 + ], + "angle": 0, + "content": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.18, + 0.828, + 0.218 + ], + "angle": 0, + "content": "Zeren Jiang1 Chuanxia Zheng1 Iro Laina1 Diane Larlus2 Andrea Vedaldi1 \n1Visual Geometry Group, University of Oxford 2Naver Labs Europe" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.22, + 0.829, + 0.234 + ], + "angle": 0, + "content": "{zeren, cxzheng, iro, vedaldi}@robots.ox.ac.uk diane.larlus@naverlabs.com" + }, + { + "type": "text", + "bbox": [ + 0.429, + 0.24, + 0.566, + 0.253 + ], + "angle": 0, + "content": "geo4d.github.io" + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.291, + 0.904, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.497, + 0.908, + 0.54 + ], + "angle": 0, + "content": "Figure 1. Geo4D repurposes a video diffusion model [102] for monocular 4D reconstruction. It uses only synthetic data for training, yet generalizes well to out-of-domain real videos. It predicts several geometric modalities, including point maps, disparity maps, and ray maps, fusing and aligning them to obtain state-of-the-art dynamic reconstruction even for scenes with extreme object and camera motion." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.552, + 0.327, + 0.567 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.585, + 0.483, + 0.795 + ], + "angle": 0, + "content": "We introduce Geo4D, a method to repurpose video diffusion models for monocular 3D reconstruction of dynamic scenes. By leveraging the strong dynamic priors captured by large-scale pre-trained video models, Geo4D can be trained using only synthetic data while generalizing well to real data in a zero-shot manner. Geo4D predicts several complementary geometric modalities, namely point, disparity, and ray maps. We propose a new multi-modal alignment algorithm to align and fuse these modalities, as well as a sliding window approach at inference time, thus enabling robust and accurate 4D reconstruction of long videos. Extensive experiments across multiple benchmarks show that Geo4D significantly surpasses state-of-the-art video depth estimation methods." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.829, + 0.222, + 0.844 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.483, + 0.901 + ], + "angle": 0, + "content": "We consider the problem of feed-forward 4D reconstruction, which involves learning a neural network to reconstruct the 3D geometry of a dynamic scene from a monoc" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.553, + 0.905, + 0.644 + ], + "angle": 0, + "content": "ular video. This task is particularly challenging for videos captured in uncontrolled settings, such as those shot with handheld cameras or downloaded from the Internet. However, a robust solution to this problem would have a tremendous impact on a wide range of applications, from video understanding to computer graphics and robotics." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.644, + 0.906, + 0.795 + ], + "angle": 0, + "content": "4D reconstruction from videos is related to multi-view static 3D reconstruction, which is typically addressed using methods from visual geometry like bundle adjustment. Recent neural networks [89, 92] have emerged as powerful tools that can replace, or at least complement, bundle adjustment. They excel especially in difficult reconstruction scenarios, involving, e.g., textureless surfaces and occlusions, thanks to the priors they learn from data. Given the additional challenges involved in 4D reconstruction, we expect that such priors would benefit this task even more." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.906, + 0.901 + ], + "angle": 0, + "content": "In fact, powerful networks like DUSt3R [92], designed for static multi-view 3D reconstruction, have recently been extended to the dynamic case, for example by MonST3R [113]. However, these models are heavily engineered to solve specific 3D reconstruction problems. Most importantly, they require significant amounts of training data with 3D annotations for supervision. Such data" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "is difficult to collect for dynamic scenes, especially in real life. This suggests using 4D synthetic training data instead. However, this data is difficult to obtain at scale, and the gap with the real world can compromise generalization." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.155, + 0.483, + 0.35 + ], + "angle": 0, + "content": "One way to mitigate this problem is to pre-train the model on tasks related to 3D reconstruction for which real data is easily available. For example, DUSt3R [92] and derived methods [113] use image matching for pretraining [98]. Here, we suggest starting instead from an off-the-shelf video generator. Video generators are powerful models, often considered proxies of world simulators [37, 54, 59]. More importantly for us, the videos they generate demonstrate an understanding of effects like camera motion and perspective, as well as typical object motion in the context of a scene. However, they only generate pixels, leaving any 3D or 4D understanding implicit and thus not directly actionable." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.352, + 0.483, + 0.563 + ], + "angle": 0, + "content": "In this work, we show that a pre-trained off-the-shelf video generator can be turned into an effective monocular feed-forward 4D reconstructor. To this end, we introduce Geo4D, a novel approach for adapting Video Generators for Geometric 4D Reconstruction. With Geo4D, we demonstrate that these generic video architectures can successfully solve complex 4D reconstruction tasks, which is a step towards future video foundation models that natively integrate 4D geometry. Prior work such as Marigold [28] and concurrent work DepthCrafter [22] have looked at adapting, respectively, image and video generators for depth estimation. Here, we go one step further and consider the full recovery of 4D geometry, including camera motion and dynamic 3D structure." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.565, + 0.483, + 0.716 + ], + "angle": 0, + "content": "With Geo4D, our goal is to make 4D geometry explicit in the video generator. This in turn requires us to choose an explicit representation of 4D information. We follow DUSt3R and adopt its viewpoint-invariant point maps. Namely, we associate each pixel in each frame with the coordinate of the corresponding 3D point, expressed relative to the first frame in the video, used as a reference. Hence, the static parts of the point clouds extracted from the different frames line up, and the dynamic parts form a 3D 'trace' of the motion of the dynamic objects, as shown in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.719, + 0.483, + 0.868 + ], + "angle": 0, + "content": "Viewpoint-invariant point maps are a powerful representation because they implicitly encode the camera motion and intrinsics and can be easily predicted by a neural network [92]. However, they are not necessarily the best representation for all parts of the scene, particularly for points far away from the observer or even at infinity, such as the sky. We thus consider two more modalities with better dynamic range, namely disparity maps and camera ray maps. Ray maps, in particular, are defined for all image pixels regardless of the scene geometry." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Our model thus predicts three modalities: point, disparity, and ray maps. These modalities are redundant in prin" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.182 + ], + "angle": 0, + "content": "ciple, but complementary in practice. At test time, we reconcile them via a fast, global optimization step and show that this leads to significantly more robust 4D reconstructions. Due to depth and ray map prediction, we show very strong empirical results on video depth estimation and in the recovery of the camera orientation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.183, + 0.905, + 0.289 + ], + "angle": 0, + "content": "One of the challenges of monocular 4D reconstruction is that it is ambiguous, significantly more so than static 3D reconstruction. However, the stochastic nature of the video generator can help deal with this ambiguity. We also introduce uncertainty maps in the encoder-decoder architecture that processes the geometric maps, and integrate them into the multi-modal alignment process." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.29, + 0.905, + 0.456 + ], + "angle": 0, + "content": "Overall, our contributions are as follows. (i) We introduce Geo4D, a 4D feed-forward network for dynamic scene reconstruction that builds on top of an off-the-shelf video generator. (ii) We suggest generating multiple partially redundant geometric modalities and fusing them at test time via lightweight optimization. (iii) We show the benefits of this multi-modal fusion in terms of improved 4D prediction accuracy. Experiments show that this model can reconstruct even highly dynamic scenes (such as the drifting scene in DAVIS [23] presented in Fig. 1) and outperforms current video depth and camera rotation estimation methods." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.471, + 0.655, + 0.487 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.497, + 0.787, + 0.513 + ], + "angle": 0, + "content": "2.1. Dynamic Scene Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.52, + 0.905, + 0.716 + ], + "angle": 0, + "content": "Static 3D reconstruction. Feed-forward 3D reconstruction has achieved remarkable success across various representations, including voxels [11, 74, 83], meshes [18, 72, 90], and point clouds [41, 110]. These advancements have been further driven by implicit neural representations [52, 56, 60, 75] and the emergence of 3D Gaussian Splatting (3D-GS) [7, 9, 29, 76, 79, 80]. Recently, DUS3R [92] introduced a point map representation for scene-level 3D reconstruction, followed by [35, 86, 89, 104]. However, these models predominantly focus on static 3D reconstruction. Our approach also uses point maps as a representation but extends them to handle dynamic scenes, which present additional challenges due to object motion over time." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Iterative 4D reconstruction. Iterative or optimization-based approaches reconstruct 4D models from monocular videos by iteratively fitting the observed data. Classical techniques often rely on RGB-D sensors [24, 53], but such steps are impractical for many real-world scenes. Recently, with advancements in neural representations [52, 56], NeRF-based approaches [27, 38, 39, 57, 58, 62] have shown impressive results. However, volume rendering in NeRF is computationally expensive. Convergence and rendering speed can be improved by using 3D-GS representations [12, 29, 34, 43, 91, 99, 107, 111], which reduce but do not eliminate the cost of iterative optimization. Very" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.092, + 0.905, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.286, + 0.908, + 0.342 + ], + "angle": 0, + "content": "Figure 2. Overview of Geo4D. During training, video conditions are injected by locally concatenating the latent feature of the video with diffused geometric features \\(\\mathbf{z}_t^{\\mathrm{X}},\\mathbf{z}_t^{\\mathrm{D}},\\mathbf{z}_t^{\\mathrm{r}}\\) and are injected globally via cross-attention in the denoising U-Net, after CLIP encoding and a query transformer. The U-Net is fine-tuned via Eq. 2. During inference, iteratively denoised latent features \\(\\hat{\\mathbf{z}}_0^{\\mathrm{X}},\\hat{\\mathbf{z}}_0^{\\mathrm{D}},\\hat{\\mathbf{z}}_0^{\\mathrm{r}}\\) are decoded by the fine-tuned VAE decoder, followed by multi-modal alignment optimization for coherent 4D reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.357, + 0.482, + 0.493 + ], + "angle": 0, + "content": "recently, MegaSaM [40] achieved highly accurate and robust camera pose estimation and reconstruction for dynamic videos, but it requires accurate monocular depth priors. Similarly, Uni4D [108] produces accurate 4D reconstructions by leveraging various visual foundation models and performing multi-stage bundle adjustment. In contrast, our approach is a diffusion-driven feed-forward framework, which eliminates the need for per-video bundle adjustment and external depth estimation models." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.499, + 0.484, + 0.695 + ], + "angle": 0, + "content": "Feed-forward 4D reconstruction. Similar to our approach, recent works have started to explore feed-forward 4D reconstruction for dynamic scenes: a monocular video with dynamic objects is processed by a neural network to recover a 4D representation. For objects, L4GM [66] andAnimate3D [26] first generate multi-view videos from a monocular video input, and subsequently apply 3D-GS [29] to reconstruct a temporally consistent 4D model. For scenes, a notable example is MonST3R [113], which adapts the static scene reconstruction of DUSt3R [92] to handle dynamic scenes. Very recently, Easi3R [8] applies attention adaptation during inference and performs 4D reconstruction based on DUSt3R [92] in an efficient, training-free manner." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.711, + 0.345, + 0.725 + ], + "angle": 0, + "content": "2.2. Geometric Diffusion Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Our method builds upon advancements in video diffusion models [3, 4, 16, 19, 21, 31, 73, 88, 94, 102, 112], which generate temporally consistent videos from text or image prompts. Recent studies have explored the rich 3D priors embedded within large-scale pre-trained diffusion models, employing either knowledge distillation [25, 42, 51, 61, 87, 96] or fine-tuning [20, 36, 45-47, 71, 85, 118] for 3D reconstruction and generation. While these methods have significantly advanced single-object 3D reconstruction from sparse inputs, they remain largely constrained to static, isolated objects centered within an image. Beyond single" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.357, + 0.907, + 0.523 + ], + "angle": 0, + "content": "object reconstruction, several recent efforts have extended pre-trained diffusion models to tackle scene-level 3D tasks, such as optical flow estimation [69], view synthesis [10, 15, 44, 68, 81, 109], depth estimation [13, 28, 117], and normal estimation [14, 33, 63]. More related to our approach, Matrix3D [49] jointly predicts depth and camera parameters, and WVD [115] introduces a hybrid RGB+point map representation for scene reconstruction. However, these approaches assume static 3D environments, whereas we address dynamic 4D scene reconstruction, which is a much harder problem due to object motion across time." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.525, + 0.909, + 0.752 + ], + "angle": 0, + "content": "More closely related to our approach, concurrent GeometryCrafter [103] introduced a point map VAE with a dual encoder-decoder architecture to improve reconstruction accuracy. However, their point maps are defined in individual camera coordinates, necessitating the use of additional segmentation [30] and tracking models [101] to recover the global point map and estimate camera poses. Aether [82], on the other hand, outputs depth maps and ray maps from a video diffusion model for 4D reconstruction. In contrast, our experiments demonstrate that performance can be significantly enhanced by jointly predicting multiple geometric modalities that capture diverse dynamic ranges, ensuring better temporal coherence and robustness. Importantly, our approach is self-contained and does not rely on external models, enhancing its generality and reliability." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.769, + 0.605, + 0.784 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.908, + 0.872 + ], + "angle": 0, + "content": "Our goal is to learn a neural network \\( f_{\\theta} \\) that can reconstruct dynamic 3D scenes from monocular videos. Given as input a monocular video \\( \\mathcal{I} = \\{I^i\\}_{i=1}^N \\) consisting of \\( N \\) frames, where each frame is an RGB image \\( I^i \\in \\mathbb{R}^{H \\times W \\times 3} \\), the network \\( f_{\\theta} \\) returns a representation of its 4D geometry:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.884, + 0.907, + 0.903 + ], + "angle": 0, + "content": "\\[\nf _ {\\boldsymbol {\\theta}}: \\left\\{\\boldsymbol {I} ^ {i} \\right\\} _ {i = 1} ^ {N} \\mapsto \\left\\{\\left(\\boldsymbol {D} ^ {i}, \\boldsymbol {X} ^ {i}, \\boldsymbol {r} ^ {i}\\right) \\right\\} _ {i = 1} ^ {N}. \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.09, + 0.482, + 0.212 + ], + "angle": 0, + "content": "The network computes the disparity map \\(D^{i}\\in \\mathbb{R}^{H\\times W\\times 1}\\) the viewpoint-invariant point map \\(X^{i}\\in \\mathbb{R}^{H\\times W\\times 3}\\), and the ray map \\(\\pmb {r}^i\\in \\mathbb{R}^{H\\times W\\times 6}\\) for each frame \\(I^i\\) \\(i = 1,\\dots ,N\\). As we discuss in Sec. 3.2, these quantities collectively represent the 4D geometry of a scene, including its dynamic structure and time-varying camera extrinsic and intrinsic parameters. No camera parameters are provided as input; these are implicitly estimated by the model as well." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.212, + 0.482, + 0.318 + ], + "angle": 0, + "content": "We implement \\( f_{\\theta} \\) as a video diffusion model, where \\( \\theta \\) are the learnable parameters. We discuss the relevant background on video diffusion models in Sec. 3.1. Then, in Sec. 3.2, we describe how we extend the model to predict the three modalities of the 4D geometry. Finally, in Sec. 3.3, we describe how we fuse and align these modalities to obtain a coherent 4D reconstruction at test time." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.327, + 0.416, + 0.342 + ], + "angle": 0, + "content": "3.1. Preliminaries: Video Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.349, + 0.483, + 0.576 + ], + "angle": 0, + "content": "Our key insight is that by building on pre-trained video diffusion models, our approach can exploit the strong motion and scene geometry priors inherently encoded within these models. Specifically, we build Geo4D on top of DynamiCrafter [102], a \"foundation\" video diffusion model. DynamiCrafter is a latent diffusion model [67]: it uses a variational autoencoder (VAE) to obtain a more compact video representation and thus reduce computational complexity. During training, a target sequence \\(\\mathcal{X} = \\pmb{x}^{1:N}\\) is first encoded into the latent space using the encoder \\(z_0^{1:N} = \\mathcal{E}(\\pmb{x}^{1:N})\\), and then perturbed by \\(\\pmb{z}_t^{1:N} = \\sqrt{\\bar{\\alpha}_t}\\pmb{z}_0^{1:N} + \\sqrt{1 - \\bar{\\alpha}_t}\\epsilon^{1:N}\\), where \\(\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) is Gaussian noise, and \\(\\bar{\\alpha}_t\\) is the noise level at step \\(t\\) of \\(T\\) noisings steps. The denoising network \\(\\epsilon_{\\theta}\\) is then trained to reverse this noisng process by optimizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.585, + 0.482, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {\\theta}} \\mathbb {E} _ {(\\boldsymbol {x} ^ {1: N}, y), t, \\epsilon^ {1: N} \\sim \\mathcal {N} (\\boldsymbol {0}, \\boldsymbol {I})} \\left\\| \\epsilon^ {1: N} - \\epsilon_ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {z} _ {t} ^ {1: N}, t, y\\right) \\right\\| _ {2} ^ {2}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.624, + 0.483, + 0.687 + ], + "angle": 0, + "content": "where \\(y\\) is the conditional input. Once trained, the model generates a video prompted by \\(y\\) via iteratively denoising from pure noise \\(\\mathbf{z}_T^{1:N}\\), and then decoding the denoised latent with a decoder \\(\\hat{\\mathcal{X}} = \\mathcal{D}(\\hat{\\mathbf{z}}_0^{1:N})\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.695, + 0.411, + 0.71 + ], + "angle": 0, + "content": "3.2. Multi-modal Geometric 4D Diffusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.717, + 0.483, + 0.762 + ], + "angle": 0, + "content": "We first provide a more precise description of the 4D multimodal representation output by our model, and then explain how it is encoded in the latent space for generation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Multi-modal geometric representations. The dynamic 3D structure of a scene is represented by a sequence of point maps \\(\\{\\pmb{X}^i\\}_{i=1}^N\\), one for each of its \\(N\\) frames. Let \\((u, v)\\) denote the pixel coordinates in the image plane. Then, the value \\(X_{uv}^i \\in \\mathbb{R}^3\\) is the 3D coordinate of the scene point that lands at pixel \\((u, v)\\) in frame \\(I^i\\), expressed in the reference frame of camera \\(i = 1\\). Because the reference frame is fixed and independent of the time-varying viewpoint, we call these point maps viewpoint-invariant. The" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.197 + ], + "angle": 0, + "content": "advantages of this representation are convincingly demonstrated by DUSt3R [92]. For a static scene, or by knowing which image pixels correspond to the static part of a scene, knowledge of the point maps allows recovery of the intrinsic and extrinsic camera parameters as well as the scene depth. This is done by solving an optimization problem that aligns the dynamic point maps with a pinhole camera model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.198, + 0.907, + 0.485 + ], + "angle": 0, + "content": "As noted in Sec. 1, while point maps \\(\\{\\pmb{X}^i\\}_{i=1}^N\\) fully encode the 4D geometry of the scene, they are not effective for all parts of the scene. Their dynamic range is limited, and they are not even defined for points at infinity (e.g. sky). Hence, we consider two additional modalities: disparity maps \\(\\{\\pmb{D}^i\\}_{i=1}^N\\) and camera ray maps \\(\\{\\pmb{r}^i\\}_{i=1}^N\\), also encouraged by prior evidence [14, 33, 49] that diffusion models can benefit from learning to predict multiple quantities. Disparity maps are not viewpoint-invariant, but have a better dynamic range than point maps (the disparity is zero for points at infinity). Ray maps represent only the camera parameters and are defined for all image pixels, independent of the scene geometry. For the disparity map, \\(D_{uv}^i\\) is the disparity (inverse depth) of the scene point that lands at pixel \\((u,v)\\), as seen in frame \\(I^i\\). For the ray map, we adopt Plücker coordinates [75, 97, 118], i.e., \\(\\pmb{r}_{uv} = (\\pmb{d}_{uv}, \\pmb{m}_{uv})\\), where \\(\\pmb{d}_{uv} = \\mathbf{R}^\\top \\mathbf{K}^{-1}(u,v,1)^\\top\\) is the ray direction, and \\(\\pmb{m}_{uv} = -\\mathbf{R}^\\top \\mathbf{t} \\times \\pmb{d}_{uv}\\), where \\((\\mathbf{R}, \\mathbf{K}, \\mathbf{t})\\) are the camera's rotation, calibration, and translation parameters." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.488, + 0.907, + 0.655 + ], + "angle": 0, + "content": "Multi-modal latent encoding. The three modalities come in the form of images and can thus be naturally predicted by the video diffusion architecture. However, this requires first mapping them to the latent space, for which we need suitable versions of the encoder \\(\\mathcal{E}\\) and decoder \\(\\mathcal{D}\\) from Sec. 3.1. Related prior work [14, 28] for depth prediction simply repurposes a pre-trained image encoder-decoder without modification. We found this to work well for disparity and ray maps, but not for point maps. Hence, for the point maps only, we fine-tune the pre-trained decoder \\(\\mathcal{D}\\) using the following objective function [100]:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.666, + 0.905, + 0.715 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = - \\sum_ {u v} \\ln \\frac {1}{\\sqrt {2} \\sigma_ {u v}} \\exp - \\frac {\\sqrt {2} \\ell_ {1} (\\mathcal {D} (\\mathcal {E} (\\boldsymbol {X})) _ {u v} , \\boldsymbol {X} _ {u v})}{\\sigma_ {u v}}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.716, + 0.905, + 0.807 + ], + "angle": 0, + "content": "where \\(\\sigma \\in \\mathbb{R}^{H\\times W}\\) is the uncertainty of the reconstructed point map, which is also predicted by an additional branch of our VAE decoder. We leave the encoder \\(\\mathcal{E}\\) unchanged to modify the latent space as little as possible; instead, we normalize the point maps to the range \\([-1,1]\\) to make them more compatible with the pre-trained image encoder." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.905, + 0.87 + ], + "angle": 0, + "content": "Video conditioning. The original video diffusion model is conditioned on a single image, but here we need to condition it on the entire input video \\(\\mathcal{I} = \\{I^i\\}_{i=1}^N\\). To this end, we use a hybrid conditioning mechanism with two streams." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.905, + 0.902 + ], + "angle": 0, + "content": "As shown in Fig. 2, in one stream, we extract a global representation of each frame \\( \\pmb{I}^i \\) by passing it to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.199 + ], + "angle": 0, + "content": "CLIP [64] followed by a lightweight learnable query transformer [1]. These vectors are incorporated in the transformer via cross-attention layers injected in each U-Net block. In the other stream, we extract local spatial features from the VAE encoder and concatenate them channel-wise to the noised latents, encoding the generated 4D modalities \\(\\{(D^i,X^i,r^i)\\}_{i = 1}^N\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.208, + 0.313, + 0.224 + ], + "angle": 0, + "content": "3.3. Multi-Modal Alignment" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.23, + 0.484, + 0.366 + ], + "angle": 0, + "content": "As noted, Geo4D predicts several non-independent geometric modalities. Furthermore, processing all frames of a long monocular video simultaneously with a video diffusion model is computationally prohibitive. Therefore, during inference, we use a temporal sliding window that segments the video into multiple overlapping clips, with partial overlap to facilitate joining them. The goal of this section is to fuse the resulting multi-modal and multi-window data into a single, coherent reconstruction of the entire video." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.369, + 0.484, + 0.476 + ], + "angle": 0, + "content": "Temporal sliding window. Given a video \\(\\mathcal{I} = \\{\\pmb{I}^i\\}_{i=1}^N\\) with \\(N\\) frames, we divide it into several video clips \\(\\mathcal{G} = \\{g^k\\}\\), \\(k \\in S\\), where each clip \\(g^k\\) contains \\(V\\) frames \\(\\{I^i\\}_{i=k}^{k+V-1}\\), and the set of starting indices is \\(\\mathcal{S} = \\{0, s, 2s, \\ldots, \\left\\lfloor \\frac{N-V}{s} \\right\\rfloor s\\} \\cup \\{N-V\\}\\). Here, \\(s\\) is the sliding window stride. The final term \\(\\{N-V\\}\\) ensures that the last clip always includes the final frames of the video." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.48, + 0.484, + 0.601 + ], + "angle": 0, + "content": "Alignment objectives. First, given the predicted point maps \\( X^{i,g} \\) for each frame \\( i \\) in each video clip \\( g \\in \\mathcal{G} \\), we derive corresponding globally aligned point maps in world coordinates, as well as the relative camera motion and scale parameters. We denote these quantities with the p subscript to emphasize that they are inferred from the point map predictions. To do so, we extend the pairwise global alignment loss from DUSt3R to a group-wise one:" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.611, + 0.482, + 0.663 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {p}} \\left(\\boldsymbol {X}, \\lambda_ {\\mathrm {p}} ^ {g}, \\boldsymbol {P} _ {\\mathrm {p}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\sum_ {u v} \\left\\| \\frac {\\boldsymbol {X} _ {u v} ^ {i} - \\lambda_ {\\mathrm {p}} ^ {g} \\boldsymbol {P} _ {\\mathrm {p}} ^ {g} \\boldsymbol {X} _ {u v} ^ {i , g}}{\\boldsymbol {\\sigma} _ {u v} ^ {i , g}} \\right\\| _ {1}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.664, + 0.484, + 0.852 + ], + "angle": 0, + "content": "where \\(\\lambda_{\\mathrm{p}}^{g}\\) and \\(P_{\\mathrm{p}}^{g} = [\\mathbf{R}_{\\mathrm{p}}^{g}|\\beta_{\\mathrm{p}}^{g}]\\) denote the group-wise scale and transformation matrix that align the group-relative point maps \\(X^{i,g}\\) to the point maps \\(X^i\\) expressed in the global reference frame. \\(\\sigma_{uv}^{i,g}\\) denotes the uncertainty of the point map for frame \\(i\\) in group \\(g\\) at pixel \\((u,v)\\). We further parameterize each of these point maps as \\(X_{uv}^{i} = \\mathbf{R}_{\\mathrm{p}}^{i^{\\top}}\\mathbf{K}_{\\mathrm{p}}^{i^{-1}}D_{\\mathrm{p},uv}^{i^{-1}}(u,v,1) + o_{\\mathrm{p}}^{i}\\) in terms of each camera's calibration \\(\\mathbf{K}_{\\mathrm{p}}^{i}\\), world-to-camera rotation \\(\\mathbf{R}_{\\mathrm{p}}^{i}\\), and center \\(o_{\\mathrm{p}}^{i}\\) expressed in the global reference frame, and the disparity map \\(D_{\\mathrm{p}}^{i}\\). Substituting this expression into the loss function (4) and minimizing it, we can thus recover \\(\\mathbf{K}_{\\mathrm{p}}^{i},\\mathbf{R}_{\\mathrm{p}}^{i},o_{\\mathrm{p}}^{i}, D_{\\mathrm{p}}^{i},\\lambda_{\\mathrm{p}}^{g},P_{\\mathrm{p}}^{g}\\) from the predicted point maps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.854, + 0.484, + 0.901 + ], + "angle": 0, + "content": "The steps above infer the disparity maps \\( D_{\\mathrm{p}}^{i} \\) from the point maps, but the model also predicts disparity maps \\( D_{\\mathrm{d}}^{i} \\) directly, where the d subscript denotes disparity prediction." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.822, + 0.107 + ], + "angle": 0, + "content": "We introduce the following loss to align them:" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.114, + 0.907, + 0.149 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d}} \\left(\\boldsymbol {D} _ {\\mathrm {p}}, \\lambda_ {\\mathrm {d}} ^ {g}, \\beta_ {\\mathrm {d}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left\\| \\boldsymbol {D} _ {\\mathrm {p}} ^ {i} - \\lambda_ {\\mathrm {d}} ^ {g} \\boldsymbol {D} _ {d} ^ {i, g} - \\beta_ {\\mathrm {d}} ^ {g} \\right\\| _ {1}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.157, + 0.899, + 0.173 + ], + "angle": 0, + "content": "where \\(\\lambda_{\\mathrm{d}}^{g}\\) and \\(\\beta_{\\mathrm{d}}^{g}\\) are optimized scale and shift parameters." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.173, + 0.907, + 0.293 + ], + "angle": 0, + "content": "Finally, the ray maps \\(\\pmb{r}\\) also encode camera pose. To align them with the global camera parameters \\((\\mathbf{R}_{\\mathrm{p}},\\mathbf{K}_{\\mathrm{p}},\\boldsymbol{o}_{\\mathrm{p}})\\) obtained from the point map, we first solve an optimization problem to extract the camera parameters from the ray map \\(\\pmb{r}^{i,g} = \\langle \\pmb{d}^{i,g},\\pmb{m}^{i,g}\\rangle\\) for each group \\(g\\) at frame \\(i\\). Following Ray Diffusion [114], the camera center \\(\\pmb{o}_{\\mathrm{c}}^{i,g}\\) is solved by finding the 3D world coordinate closest to the intersection of all rays:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.301, + 0.907, + 0.333 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} = \\arg \\min _ {\\boldsymbol {p} \\in \\mathbb {R} ^ {3}} \\sum_ {u \\in H, v \\in W} \\| \\boldsymbol {p} \\times \\boldsymbol {d} _ {u v} ^ {i, g} - \\boldsymbol {m} _ {u v} ^ {i, g} \\| ^ {2}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.341, + 0.906, + 0.387 + ], + "angle": 0, + "content": "The camera extrinsics are solved by optimizing for the matrix \\(\\mathbf{H}\\) that transforms the predicted per-pixel ray directions \\(d_{uv}^{i,g}\\) to the ray directions \\(\\mathbf{u}_{uv}\\) of a canonical camera:" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.395, + 0.907, + 0.427 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} ^ {i, g} = \\underset {\\| \\mathbf {H} \\| = 1} {\\arg \\min } \\sum_ {u \\in H, v \\in W} \\left\\| \\mathbf {H} d _ {u v} ^ {i, g} \\times \\mathbf {u} _ {u v} \\right\\|. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.437, + 0.907, + 0.483 + ], + "angle": 0, + "content": "Then the world-to-camera rotation matrix \\(\\mathbf{R}_c^{i,g}\\) and intrinsic matrix \\(\\mathbf{K}_c^{i,g}\\) can be solved using the RQ-decomposition of \\(\\mathbf{H}^{i,g}\\). Finally, the camera trajectory alignment loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.489, + 0.905, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {\\mathrm {p}}, \\mathbf {R} _ {\\mathrm {c}} ^ {g}, \\beta_ {\\mathrm {c}} ^ {g}, \\lambda_ {\\mathrm {c}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {c}} ^ {g} \\mathbf {R} _ {\\mathrm {c}} ^ {i, g} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} \\right. \\\\ \\left. + \\left\\| \\lambda_ {\\mathrm {c}} ^ {g} \\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} + \\beta_ {\\mathrm {c}} ^ {g} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right), \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.576, + 0.906, + 0.653 + ], + "angle": 0, + "content": "where \\(R_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}\\) are learnable group-wise rotation matrix, translation vector, and scale, respectively, to align the global camera trajectory \\((\\mathbf{R}_p, \\mathbf{o}_p)\\) and the predicted ones \\((\\mathbf{R}_c, \\mathbf{o}_c)\\). Following MonST3R [113], we also use a loss to smooth the camera trajectory:" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.66, + 0.905, + 0.713 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {s}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {p}\\right) = \\sum_ {i = 1} ^ {N} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} + \\left\\| \\boldsymbol {o} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.714, + 0.906, + 0.743 + ], + "angle": 0, + "content": "The final optimization objective is the weighted combination of the losses above:" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.753, + 0.905, + 0.77 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {a l l}} = \\alpha_ {1} \\mathcal {L} _ {\\mathrm {p}} + \\alpha_ {2} \\mathcal {L} _ {\\mathrm {d}} + \\alpha_ {3} \\mathcal {L} _ {\\mathrm {c}} + \\alpha_ {4} \\mathcal {L} _ {\\mathrm {s}}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.901 + ], + "angle": 0, + "content": "A note on the invariants. The model predicts point maps, disparity maps, and ray map origins up to scale, as this cannot be uniquely determined from a monocular video. The disparity map is also recovered up to a translation, which discounts the focal length (this is sometimes difficult to estimate due to the dolly zoom effect). Likewise, the ray map origin is recovered up to a shift, necessary to allow normalizing these maps." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.107, + 0.089, + 0.891, + 0.276 + ], + "angle": 0, + "content": "
CategoryMethodSintel [5]Bonn [55]KITTI [17]
Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑
Single-frame depthMarigold [28]0.53251.50.09193.10.14979.6
Depth-Anything-V2 [106]0.36755.40.10692.10.14080.4
Video depthNVDS [95]0.40848.30.16776.60.25358.8
ChronoDepth [70]0.68748.60.10091.10.16775.9
DepthCrafter* [22]0.27069.70.07197.20.10489.6
Video depth & Camera poseRobust-CVD [32]0.70347.8
CasualSAM [116]0.38754.70.16973.70.24662.2
MonST3R [113]0.33558.50.06396.40.10489.5
Geo4D (Ours)0.20573.50.05997.20.08693.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.277, + 0.908, + 0.319 + ], + "angle": 0, + "content": "Table 1. Video depth estimation on Sintel [5], Bonn [55] and KITTI [17] datasets. We follow the evaluation protocols established in recent MonST3R [113] for a fair comparison. Notably, results for DepthCrafter* are reported from its latest version (v1.0.1). The Best and the second best results are highlighted." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.326, + 0.226, + 0.344 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.351, + 0.3, + 0.369 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.374, + 0.484, + 0.466 + ], + "angle": 0, + "content": "Training datasets. Geo4D is trained exclusively on synthetic datasets, yet demonstrates strong generalization to real-world videos. Specifically, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. See the Supp. Mat Tab. 5 for details." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.469, + 0.484, + 0.666 + ], + "angle": 0, + "content": "Training. Our Geo4D is initialized with the weights of DynamiCrafter [102] and trained using AdamW [48] with a learning rate of \\( 1 \\times 10^{-5} \\) and a batch size of 32. We use a progressive training strategy to improve convergence and stability. First, we train the model to generate a single geometric modality, i.e., the point maps, at a fixed resolution of \\( 512 \\times 320 \\). Next, we introduce a multi-resolution training scheme to improve generalization and robustness, which includes various resolutions: \\( 512 \\times 384 \\), \\( 512 \\times 320 \\), \\( 576 \\times 256 \\), \\( 640 \\times 192 \\). Finally, we progressively add additional geometric modalities, i.e., the ray and depth maps. Training is conducted on 4 NVIDIA H100 GPUs with a total training time of approximately one week." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.669, + 0.484, + 0.805 + ], + "angle": 0, + "content": "Inference. As described in Sec. 3.2, given an \\(N\\)-frame video as input, we first split it into overlapping clips \\(\\mathcal{G}\\), each containing \\(V = 16\\) frames, with a stride of \\(s = 4\\). Each video clip is encoded and fed to the diffusion model to sample multi-modal 4D parameters \\((X^{i,g}, D^{i,g}, r^{i,g})\\) for the video. For sampling, we use DDIM [77] with 5 steps. Finally, the alignment algorithm in Sec. 3.2 is used to fuse the clips into a globally coherent 4D reconstruction of the entire video." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.818, + 0.313, + 0.835 + ], + "angle": 0, + "content": "4.2. Video Depth Estimation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Testing data. Our hypothesis is that, despite being trained on synthetic data, our model can generalize well to out-of-distribution synthetic and real data, as it is based on a pre-trained video diffusion model. To test this hypothe" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.327, + 0.907, + 0.465 + ], + "angle": 0, + "content": "sis, we evaluate our model on three benchmarks: Sintel [5] is a synthetic dataset that provides accurate depth annotations, covering diverse scenes with complex camera motion. KITTI [17] is a large driving dataset collected using stereo cameras and LiDAR sensors. Bonn [55] focuses on dynamic indoor scenes. To ensure fair comparisons, we follow the evaluation protocol used by MonST3R [113], where depth sequences are uniformly sampled from the datasets, extracting 50-110 frames per sequence for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.473, + 0.909, + 0.626 + ], + "angle": 0, + "content": "Metrics. Following the standard affine-invariant depth evaluation protocol [65], we align the predicted video depth with the ground-truth depth before computing metrics. However, unlike single-image depth estimation [28, 105, 106], where depth alignment is performed per frame, we enforce global scale consistency by applying a single scale and shift across the entire video sequence. For quantitative evaluation, we adopt two widely used depth metrics: absolute relative error (Abs Rel) and the percentage of inlier points (with a threshold value of \\(\\delta < 1.25\\))." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.634, + 0.909, + 0.727 + ], + "angle": 0, + "content": "Baselines. We compare Geo4D to state-of-the-art single-frame depth estimation methods (Marigold [28] and Depth-Anything-V2 [106]), video depth prediction (NVDS [95], ChronoDepth [70], and DepthCrafter [22]), and joint video depth and camera pose prediction (Robust-CVD [32], CausalSAM [116], and MonST3R [113])." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.911, + 0.903 + ], + "angle": 0, + "content": "Results. As shown in Table 1, all versions of Geo4D outperform state-of-the-art methods by a large margin. This includes DepthCrafter [22] and MonST3R [113], the most recent video depth diffusion model and the dynamic extension of DUSt3R to dynamic scenes, respectively. Notably, while both Geo4D and DepthCrafter are based on the same video diffusion model (DynamiCrafter), our model outperforms DepthCrafter in Abs Rel by \\(24.0\\%\\) on Sintel and \\(17.3\\%\\) on KITTI, despite solving a more general problem. Qualitatively, Fig. 3 shows that Geo4D achieves more consistent results, especially for fast-moving objects." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.506, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.096, + 0.201, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.097, + 0.333, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.095, + 0.554, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.56, + 0.098, + 0.67, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.097, + 0.904, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.455, + 0.907, + 0.497 + ], + "angle": 0, + "content": "Figure 3. Qualitative results comparing Geo4D with MonST3R [113]. Attributed to our group-wise inference manner and prior geometry knowledge from pretrained video diffusion, our model successfully produces consistent 4D geometry under fast motion (first row) and deceptive reflection in the water (second row)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.508, + 0.317, + 0.523 + ], + "angle": 0, + "content": "4.3. Camera Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.531, + 0.483, + 0.743 + ], + "angle": 0, + "content": "Setup. We evaluate the performance of Geo4D on both the synthetic Sintel [5] dataset and the realistic TUM-dynamics [78] dataset. We follow the same evaluation protocol as in MonST3R [113]. Specifically, on Sintel, we select 14 dynamic sequences, and for TUM-dynamics, we sample the first 90 frames of each sequence with a temporal stride of 3. After aligning the predicted camera trajectory with the ground truth using the Umayama algorithm, we calculate three commonly used metrics: Absolute Translation Error (ATE), Relative Translation Error (RPE-T), and Relative Rotation Error (RPE-R). We compare our method with other state-of-the-art discriminative methods, which jointly predict camera pose and depth, including Robust-CVD [32], CausalSAM [116], and MonST3R [113]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.746, + 0.483, + 0.837 + ], + "angle": 0, + "content": "Results. To the best of our knowledge, Geo4D is the first method that uses a generative model to estimate camera parameters in a dynamic scene. As shown in Tab. 2, compared to existing non-generative alternatives, we achieve much better camera rotation prediction (RPE-R) and comparable camera translation prediction (ATE and RPE-T)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.848, + 0.313, + 0.865 + ], + "angle": 0, + "content": "4.4. Qualitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "4D reconstruction. We compare Geo4D with the state-of-the-art MonST3R method on the DAVIS [23] dataset. Up-" + }, + { + "type": "table", + "bbox": [ + 0.514, + 0.507, + 0.905, + 0.595 + ], + "angle": 0, + "content": "
MethodSintelTUM-dynamics
ATE ↓RPE-T ↓RPE-R ↓ATE ↓RPE-T ↓RPE-R ↓
Robust-CVD [32]0.3600.1543.4430.1530.0263.528
CasualSAM [116]0.1410.0350.6150.0710.0101.712
MonST3R [113]0.1080.0420.7320.0630.0091.217
Geo4D (Ours)0.1850.0630.5470.0730.0200.635
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.596, + 0.907, + 0.637 + ], + "angle": 0, + "content": "Table 2. Quantitative evaluation for camera pose estimation. We achieve comparable camera pose estimation performance with other discriminative SOTA methods." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.653, + 0.907, + 0.835 + ], + "angle": 0, + "content": "grading from pairwise alignment as in MonST3R to our group-wise alignment improves temporal consistency, leading to a more stable and globally coherent 4D reconstruction of point maps and camera trajectory, particularly in highly dynamic scenes. As shown in the top row of Fig. 3, Geo4D successfully tracks the racing car in 4D, whereas MonST3R struggles due to the rapid motion between pairs of images. Furthermore, likely due to the strong prior captured by the pre-trained video generative model, Geo4D correctly reconstructs the reflection of the flamingo in the water (second row in Fig. 3), whereas MonST3R misinterprets the reflection as a foreground object, resulting in incorrect depth." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Video depth prediction. We compare Geo4D with state-of-the-art video depth predictors MonST3R [113] and DepthCrafter [22] on the Sintel [5] dataset. Qualitatively, Geo4D produces more detailed geometry, for instance for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.907, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.218, + 0.907, + 0.259 + ], + "angle": 0, + "content": "Figure 4. Qualitative video depth results comparing Geo4D with MonST3R [113] and DepthCrafter [22]. Owing to our proposed multimodal training and alignment, as well as the prior knowledge from diffusion, our method can infer a more detailed structure (first row) and a more accurate spatial arrangement from video (second row)." + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.26, + 0.905, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.369, + 0.907, + 0.412 + ], + "angle": 0, + "content": "Table 3. Ablation study for the different modalities of the geometric representation on the Sintel [5] dataset. We demonstrate the effectiveness of our key design choices that both leverage multi-modality as additional training supervision signal and postprocess through our proposed multi-modal alignment algorithm will improve the overall performance." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.418, + 0.481, + 0.498 + ], + "angle": 0, + "content": "
Strides / frameVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
150.920.21372.40.2100.0920.574
81.240.21272.80.2220.0740.524
41.890.20573.50.1850.0630.547
23.260.20472.90.1810.0580.518
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.5, + 0.483, + 0.542 + ], + "angle": 0, + "content": "Table 4. Ablation study for the temporal sliding window stride on the Sintel [5] dataset. There is a trade-off between performance and inference speed." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.551, + 0.484, + 0.598 + ], + "angle": 0, + "content": "the rope on the stick in the first row of Fig. 4, and a better spatial arrangement between different dynamic objects, as shown in the second row of Fig. 4." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.607, + 0.245, + 0.623 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.629, + 0.483, + 0.658 + ], + "angle": 0, + "content": "We ablate our key design choices and the effect of different modalities on the Sintel dataset." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.659, + 0.483, + 0.81 + ], + "angle": 0, + "content": "We study the effect of multi-modality in Tab. 3. The three modalities—point map, disparity map, and ray map—can be used either at training or inference time, or both. The first two rows show that the diffusion model trained with point maps as a single modality performs worse in both video depth and camera pose estimation than the diffusion model trained with all three modalities. Therefore, the other two modalities, even if they can be seen as redundant, serve as additional supervisory signals during training, which improves the generalization ability of the diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.901 + ], + "angle": 0, + "content": "We then investigate the effectiveness of our multi-modal alignment algorithm. Compared with the second to the fourth row in Tab. 3, which leverage only a single modality during inference, multi-modal alignment optimization (last row) achieves the best performance, showing the benefits of fusing the multiple modalities at inference time." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.42, + 0.907, + 0.556 + ], + "angle": 0, + "content": "We ablate the sliding window stride in Tab. 4. Results improve with a shorter stride, in part because this means that more windows and estimates are averaged, reducing the variance of the predictions by the denoising diffusion model, which is stochastic. We choose stride \\( s = 4 \\) for our main results to balance runtime and performance. Note that MonST3R [113] requires 2.41 seconds to process one frame under the same setting, so our method is 1.27 times faster than MonST3R [113]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.57, + 0.763, + 0.587 + ], + "angle": 0, + "content": "5. Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.595, + 0.906, + 0.732 + ], + "angle": 0, + "content": "We have introduced Geo4D, a novel approach that adapts a video generator for dynamic 4D reconstruction. By building on a pre-trained video generator, Geo4D achieves excellent generalization to real data despite being trained only on synthetic 4D data. We have also demonstrated the benefits of predicting multiple modalities and fusing them at test time via optimization. Our model outperforms state-of-the-art methods on video depth and camera rotation prediction, particularly in challenging dynamic scenes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.732, + 0.905, + 0.793 + ], + "angle": 0, + "content": "Despite these successes, our approach has limitations. One is that the point map encoder-decoder is still not entirely accurate, which in turn is a bottleneck for the overall reconstruction quality." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.792, + 0.905, + 0.853 + ], + "angle": 0, + "content": "Our approach also opens a path to integrating 4D geometry into video foundation models, e.g., to generate 3D animations from text, or to provide a more actionable signal when the video model is used as a proxy for a world model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Acknowledgments. The authors of this work were supported by Clarendon Scholarship, ERC 101001212-UNION, and EPSRC EP/Z001811/1 SYN3D." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.115, + 0.484, + 0.212 + ], + "angle": 0, + "content": "[1] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, Jenia Jitsev, Simon Kornblith, Pang Wei Koh, Gabriel Ilharco, Mitchell Wortman, and Ludwig Schmidt. Openflamingo: An opensource framework for training large autoregressive vision-language models, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.213, + 0.484, + 0.255 + ], + "angle": 0, + "content": "[2] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. Bedlam: A synthetic dataset of bodies exhibiting detailed lifelike animated motion, 2023. 6, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.256, + 0.483, + 0.325 + ], + "angle": 0, + "content": "[3] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.326, + 0.484, + 0.409 + ], + "angle": 0, + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 22563-22575, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.41, + 0.483, + 0.465 + ], + "angle": 0, + "content": "[5] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In European Conference on Computer Vision (ECCV), 2012. 6, 7, 8, 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.467, + 0.482, + 0.493 + ], + "angle": 0, + "content": "[6] Yohann Cabon, Naila Murray, and Martin Humenberger. Virtual kitti 2, 2020. 6, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.495, + 0.483, + 0.577 + ], + "angle": 0, + "content": "[7] David Charatan, Sizhe Lester Li, Andrea Tagliasacchi, and Vincent Sitzmann. pixelsplat: 3d gaussian splats from image pairs for scalable generalizable 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19457-19467, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.579, + 0.483, + 0.633 + ], + "angle": 0, + "content": "[8] Xingyu Chen, Yue Chen, Yuliang Xiu, Andreas Geiger, and Anpei Chen. Easi3r: Estimating disentangled motion from dust3r without training. arXiv preprint arXiv:2503.24391, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.635, + 0.483, + 0.702 + ], + "angle": 0, + "content": "[9] Yuedong Chen, Haofei Xu, Chuanxia Zheng, Bohan Zhuang, Marc Pollefeys, Andreas Geiger, Tat-Jen Cham, and Jianfei Cai. MVSplat: efficient 3d gaussian splattering from sparse multi-view images. arXiv, 2403.14627, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.705, + 0.483, + 0.773 + ], + "angle": 0, + "content": "[10] Yuedong Chen, Chuanxia Zheng, Haofei Xu, Bohan Zhuang, Andrea Vedaldi, Tat-Jen Cham, and Jianfei Cai. Mvsplat360: Feed-forward 360 scene synthesis from sparse views. In Neural Information Processing Systems (NeurIPS), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.775, + 0.483, + 0.844 + ], + "angle": 0, + "content": "[11] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In European conference on computer vision (ECCV), pages 628-644. Springer, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.846, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[12] Wen-Hsuan Chu, Lei Ke, and Katerina Fragkiadaki. Dreamscene4d: Dynamic multi-object scene generation from monocular videos. Advances in Neural Information Processing Systems (NeurIPS), 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.115, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.905, + 0.148 + ], + "angle": 0, + "content": "[13] Yiquan Duan, Xianda Guo, and Zheng Zhu. Diffusion-depth: Diffusion denoising approach for monocular depth estimation. In European Conference on Computer Vision (ECCV), pages 432-449. Springer, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.15, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[14] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. In European Conference on Computer Vision (ECCV), pages 241-258. Springer, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.234, + 0.905, + 0.303 + ], + "angle": 0, + "content": "[15] Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.305, + 0.905, + 0.388 + ], + "angle": 0, + "content": "[16] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22930-22941, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.39, + 0.905, + 0.432 + ], + "angle": 0, + "content": "[17] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The KITTI dataset. International Journal of Robotics Research (IJRR), 2013. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.434, + 0.905, + 0.487 + ], + "angle": 0, + "content": "[18] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In Proceedings of the IEEE/CVF international conference on computer vision (CVPR), pages 9785-9795, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.49, + 0.905, + 0.572 + ], + "angle": 0, + "content": "[19] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. In International Conference on Learning Representations (ICLR), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.575, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[20] Junlin Han, Filippos Kokkinos, and Philip Torr. Vfusion3d: Learning scalable 3d generative models from video diffusion models. In European Conference on Computer Vision (ECCV), pages 333-350. Springer, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.632, + 0.905, + 0.686 + ], + "angle": 0, + "content": "[21] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Neural Information Processing Systems (NeurIPS), 35:8633-8646, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.688, + 0.905, + 0.743 + ], + "angle": 0, + "content": "[22] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 2, 6, 7, 8, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.746, + 0.905, + 0.786 + ], + "angle": 0, + "content": "[23] Jia-Bin Huang, Sing Bing Kang, Narendra Ahuja, and Johannes Kopf. Temporally coherent completion of dynamic video. In ACM, 2016. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.789, + 0.905, + 0.857 + ], + "angle": 0, + "content": "[24] Matthias Innmann, Michael Zollhöfer, Matthias Nießner, Christian Theobalt, and Marc Stamminger. Volumedeform: Real-time volumetric non-rigid reconstruction. In European conference on computer vision (ECCV), pages 362-379. Springer, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.86, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[25] Tomas Jakab, Ruining Li, Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Farm3d: Learning articulated 3d animals by distilling 2d diffusion. In 2024 International" + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.133, + 0.092, + 0.482, + 0.12 + ], + "angle": 0, + "content": "Conference on 3D Vision (3DV), pages 852-861. IEEE, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.123, + 0.483, + 0.178 + ], + "angle": 0, + "content": "[26] Yanqin Jiang, Chaohui Yu, Chenjie Cao, Fan Wang, Weiming Hu, and Jin Gao. *Animate3d: Animating any 3d model with multi-view video diffusion*. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.181, + 0.483, + 0.25 + ], + "angle": 0, + "content": "[27] Zeren Jiang, Chen Guo, Manuel Kaufmann, Tianjian Jiang, Julien Valentin, Otmar Hilliges, and Jie Song. Multiply: Reconstruction of multiple people from monocular video in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.253, + 0.482, + 0.336 + ], + "angle": 0, + "content": "[28] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9492-9502, 2024. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.339, + 0.482, + 0.394 + ], + "angle": 0, + "content": "[29] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.397, + 0.482, + 0.453 + ], + "angle": 0, + "content": "[30] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.455, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[31] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.527, + 0.483, + 0.582 + ], + "angle": 0, + "content": "[32] Johannes Kopf, Xuejian Rong, and Jia-Bin Huang. Robust consistent video depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1611-1621, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.585, + 0.482, + 0.64 + ], + "angle": 0, + "content": "[33] Akshay Krishnan, Xinchen Yan, Vincent Casser, and Abhijit Kundu. Orchid: Image latent diffusion for joint appearance and geometry generation. arXiv preprint arXiv:2501.13087, 2025. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.643, + 0.482, + 0.698 + ], + "angle": 0, + "content": "[34] Jiahui Lei, Yijia Weng, Adam Harley, Leonidas Guibas, and Kostas Daniilidis. Mosca: Dynamic gaussian fusion from casual videos via 4d motion scaffolds. arXiv preprint arXiv:2405.17421, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.701, + 0.482, + 0.756 + ], + "angle": 0, + "content": "[35] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pages 71-91. Springer, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.76, + 0.482, + 0.842 + ], + "angle": 0, + "content": "[36] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.846, + 0.482, + 0.899 + ], + "angle": 0, + "content": "[37] Xuanyi Li, Daquan Zhou, Chenxu Zhang, Shaodong Wei, Qibin Hou, and Ming-Ming Cheng. Sora generates videos with stunning geometrical consistency. arXiv, 2402.17403, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.092, + 0.483, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.905, + 0.162 + ], + "angle": 0, + "content": "[38] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.164, + 0.905, + 0.233 + ], + "angle": 0, + "content": "[39] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4273-4284, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.235, + 0.905, + 0.317 + ], + "angle": 0, + "content": "[40] Zhengqi Li, Richard Tucker, Forrester Cole, Qianqian Wang, Linyi Jin, Vickie Ye, Angjoo Kanazawa, Aleksander Holynski, and Noah Snavely. Megasam: Accurate, fast and robust structure and motion from casual dynamic videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.319, + 0.905, + 0.375 + ], + "angle": 0, + "content": "[41] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.377, + 0.905, + 0.459 + ], + "angle": 0, + "content": "[42] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 300–309, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.462, + 0.904, + 0.502 + ], + "angle": 0, + "content": "[43] Youtian Lin, Zuozhuo Dai, Siyu Zhu, and Yao Yao. Gaussian-flow: 4d reconstruction with dynamic 3d gaussian particle. In CVPR, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.504, + 0.905, + 0.559 + ], + "angle": 0, + "content": "[44] Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.562, + 0.907, + 0.631 + ], + "angle": 0, + "content": "[45] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), pages 9298–9309, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.633, + 0.905, + 0.701 + ], + "angle": 0, + "content": "[46] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. In The Twelfth International Conference on Learning Representations (ICLR), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.704, + 0.905, + 0.799 + ], + "angle": 0, + "content": "[47] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9970-9980, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.802, + 0.905, + 0.83 + ], + "angle": 0, + "content": "[48] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[49] Yuanxun Lu, Jingyang Zhang, Tian Fang, Jean-Daniel Nahmias, Yanghai Tsin, Long Quan, Xun Cao, Yao Yao, and Shiwei Li. Matrix3d: Large photogrammetry model all-in-one. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.092, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[50] Lukas Mehl, Jenny Schmalfuss, Azin Jahedi, Yaroslava Nalivayko, and Andres Bruhn. Spring: A high-resolution high-detail dataset and benchmark for scene flow, optical flow and stereo. In Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 6, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.163, + 0.482, + 0.232 + ], + "angle": 0, + "content": "[51] Luke Melas-Kyriazi, Iro Laina, Christian Rupprecht, and Andrea Vedaldi. Realfusion: 360deg reconstruction of any object from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 8446-8455, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.233, + 0.483, + 0.288 + ], + "angle": 0, + "content": "[52] B Mildenhall, PP Srinivasan, M Tancik, JT Barron, R Ramamoorthi, and R Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.289, + 0.482, + 0.357 + ], + "angle": 0, + "content": "[53] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 343-352, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.359, + 0.482, + 0.663 + ], + "angle": 0, + "content": "[54] NVIDIA, Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, Daniel Dworakowski, Jiaojiao Fan, Michele Fenzi, Francesco Ferroni, Sanja Fidler, Dieter Fox, Songwei Ge, Yunhao Ge, Jinwei Gu, Siddharth Gururani, Ethan He, Jiahui Huang, Jacob Huffman, Pooya Jannaty, Jingyi Jin, Seung Wook Kim, Gergely Klár, Grace Lam, Shiyi Lan, Laura Leal-Taixe, Anqi Li, Zhaoshuo Li, Chen-Hsuan Lin, Tsung-Yi Lin, Huan Ling, Ming-Yu Liu, Xian Liu, Alice Luo, Qianli Ma, Hanzi Mao, Kaichun Mo, Arsalan Mousavian, Seungjun Nah, Sriharsha Niverty, David Page, Despoina Paschalidou, Zeeshan Patel, Lindsey Pavao, Morteza Ramezanali, Fitsum Reda, Xiaowei Ren, Vasanth Rao Naik Sabavat, Ed Schmerling, Stella Shi, Bartosz Stefaniak, Shitao Tang, Lyne Tchapmi, Przemek Tredak, Wei-Cheng Tseng, Jibin Varghese, Hao Wang, Haoxiang Wang, Heng Wang, Ting-Chun Wang, Fangyin Wei, Xinyue Wei, Jay Zhangjie Wu, Jiashu Xu, Wei Yang, Lin Yen-Chen, Xiaohui Zeng, Yu Zeng, Jing Zhang, Qinsheng Zhang, Yuxuan Zhang, Qingqing Zhao and Artur Zolkowski. Cosmos world foundation model platform for physical ai. arXiv, 2501.03575, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.664, + 0.482, + 0.746 + ], + "angle": 0, + "content": "[55] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguère, and C. Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.748, + 0.482, + 0.829 + ], + "angle": 0, + "content": "[56] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.832, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[57] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.092, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.905, + 0.175 + ], + "angle": 0, + "content": "[58] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: a higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 40 (6):1-12, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.178, + 0.905, + 0.316 + ], + "angle": 0, + "content": "[59] Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjoyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Rocktäschel. Genie 2: A large-scale foundation world model, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.318, + 0.905, + 0.374 + ], + "angle": 0, + "content": "[60] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European conference on computer vision (ECCV), pages 523-540. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.376, + 0.905, + 0.431 + ], + "angle": 0, + "content": "[61] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.433, + 0.905, + 0.502 + ], + "angle": 0, + "content": "[62] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.504, + 0.905, + 0.6 + ], + "angle": 0, + "content": "[63] Lingteng Qiu, Guanying Chen, Xiaodong Gu, Qi Zuo, Mutian Xu, Yushuang Wu, Weihao Yuan, Zilong Dong, Liefeng Bo, and Xiaoguang Han. Richdreamer: A generalizable normal-depth diffusion model for detail richness in text-to-3d. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9914–9925, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.603, + 0.905, + 0.687 + ], + "angle": 0, + "content": "[64] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), pages 8748-8763. PmLR, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.689, + 0.905, + 0.757 + ], + "angle": 0, + "content": "[65] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 44:1623-1637, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.76, + 0.905, + 0.829 + ], + "angle": 0, + "content": "[66] Jiawei Ren, Kevin Xie, Ashkan Mirzaei, Hanxue Liang, Xiaohui Zeng, Karsten Kreis, Ziwei Liu, Antonio Torralba, Sanja Fidler, Seung Wook Kim, and Huan Ling. L4gm: Large 4d gaussian reconstruction model. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[67] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 4" + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.092, + 0.486, + 0.189 + ], + "angle": 0, + "content": "[68] Kyle Sargent, Zizhang Li, Tanmay Shah, Charles Herrmann, Hong-Xing Yu, Yunzhi Zhang, Eric Ryan Chan, Dmitry Lagun, Li Fei-Fei, Deqing Sun, et al. Zeronvs: Zero-shot 360-degree view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9420–9429, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.191, + 0.486, + 0.274 + ], + "angle": 0, + "content": "[69] Saurabh Saxena, Charles Herrmann, Junhwa Hur, Abhishek Kar, Mohammad Norouzi, Deqing Sun, and David J Fleet. The surprising effectiveness of diffusion models for optical flow and monocular depth estimation. Advances in Neural Information Processing Systems (NeurIPS), 36:39443-39469, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.276, + 0.485, + 0.331 + ], + "angle": 0, + "content": "[70] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. arXiv, 2406.01493, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.333, + 0.484, + 0.389 + ], + "angle": 0, + "content": "[71] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.39, + 0.485, + 0.473 + ], + "angle": 0, + "content": "[72] Yawar Siddiqui, Antonio Alliegro, Alexey Artemov, Tatiana Tommasi, Daniele Sirigatti, Vladislav Rosov, Angela Dai, and Matthias Nießner. Meshgpt: Generating triangle meshes with decoder-only transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19615-19625, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.475, + 0.485, + 0.558 + ], + "angle": 0, + "content": "[73] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, Devi Parikh, Sonal Gupta, and Yaniv Taigman. Make-a-video: Text-to-video generation without text-video data. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.56, + 0.485, + 0.641 + ], + "angle": 0, + "content": "[74] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2437-2446, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.645, + 0.485, + 0.714 + ], + "angle": 0, + "content": "[75] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems (NeurIPS), 34:19313-19325, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.716, + 0.485, + 0.77 + ], + "angle": 0, + "content": "[76] Brandon Smart, Chuanxia Zheng, Iro Laina, and Victor Adrian Prisacariu. Splatt3r: Zero-shot gaussian splatting from uncalibrated image pairs. arXiv preprint arXiv:2408.13912, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.773, + 0.485, + 0.801 + ], + "angle": 0, + "content": "[77] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.802, + 0.485, + 0.87 + ], + "angle": 0, + "content": "[78] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of rgb-d slam systems. 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 573-580, 2012. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.873, + 0.485, + 0.901 + ], + "angle": 0, + "content": "[79] Stanislaw Szymanowicz, Christian Rupprecht, and Andrea Vedaldi. Splatter Image: Ultra-fast single-view 3D recon" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.092, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.553, + 0.093, + 0.905, + 0.121 + ], + "angle": 0, + "content": "struction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.123, + 0.907, + 0.205 + ], + "angle": 0, + "content": "[80] Stanislaw Szymanowicz, Eldar Insafutdinov, Chuanxia Zheng, Dylan Campbell, João F. Henriques, Christian Rupprecht, and Andrea Vedaldi. Flash3D: Feed-forward generalisable 3D scene reconstruction from a single image. In Proceedings of the International Conference on 3D Vision (3DV), 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.207, + 0.907, + 0.275 + ], + "angle": 0, + "content": "[81] Stanislaw Szymanowicz, Jason Y Zhang, Pratul Srinivasan, Ruiqi Gao, Arthur Brussee, Aleksander Holynski, Ricardo Martin-Brualla, Jonathan T Barron, and Philipp Henzler. Bolt3d: Generating 3d scenes in seconds. arXiv preprint arXiv:2503.14445, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.278, + 0.906, + 0.346 + ], + "angle": 0, + "content": "[82] Aether Team, Haoyi Zhu, Yifan Wang, Jianjun Zhou, Wenzheng Chang, Yang Zhou, Zizun Li, Junyi Chen, Chunhua Shen, Jiangmiao Pang, and Tong He. Aether: Geometric-aware unified world modeling. arXiv preprint arXiv:2503.18945, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.349, + 0.906, + 0.417 + ], + "angle": 0, + "content": "[83] Shubham Tulsiani, Tinghui Zhou, Alexei A Efros, and Jitendra Malik. Multi-view supervision for single-view reconstruction via differentiable ray consistency. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2626-2634, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.42, + 0.906, + 0.473 + ], + "angle": 0, + "content": "[84] S. Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis and Machine Intelligence, 13(4):376-380, 1991. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.476, + 0.906, + 0.559 + ], + "angle": 0, + "content": "[85] Vikram Voleti, Chun-Han Yao, Mark Boss, Adam Letts, David Pankratz, Dmitry Tochilkin, Christian Laforte, Robin Rombach, and Varun Jampani. Sv3d: Novel multi-view synthesis and 3d generation from a single image using latent video diffusion. In European Conference on Computer Vision (ECCV), pages 439-457. Springer, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.561, + 0.906, + 0.602 + ], + "angle": 0, + "content": "[86] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. In International Conference on 3D Vision (3DV), 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.604, + 0.906, + 0.685 + ], + "angle": 0, + "content": "[87] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12619-12629, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.688, + 0.906, + 0.731 + ], + "angle": 0, + "content": "[88] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.732, + 0.906, + 0.8 + ], + "angle": 0, + "content": "[89] Jianyuan Wang, Minghao Chen, Nikita Karaev, Andrea Vedaldi, Christian Rupprecht, and David Novotny. VGGT: Visual geometry grounded network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.802, + 0.906, + 0.87 + ], + "angle": 0, + "content": "[90] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), pages 52-67, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.873, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[91] Qianqian Wang, Vickie Ye, Hang Gao, Weijia Zeng, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of" + }, + { + "type": "list", + "bbox": [ + 0.524, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.133, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "motion: 4d reconstruction from a single video. In arXiv preprint arXiv:2407.13764, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.122, + 0.483, + 0.191 + ], + "angle": 0, + "content": "[92] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. Dust3r: Geometric 3d vision made easy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20697-20709, 2024. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.193, + 0.482, + 0.274 + ], + "angle": 0, + "content": "[93] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian A. Scherer. Tartanair: A dataset to push the limits of visual slam. 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4909-4916, 2020. 6, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.278, + 0.482, + 0.358 + ], + "angle": 0, + "content": "[94] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems (NeurIPS), 36:7594-7611, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.363, + 0.482, + 0.403 + ], + "angle": 0, + "content": "[95] Yiran Wang, Min Shi, Jiaqi Li, Zihao Huang, Zhiguo Cao, Jianming Zhang, Ke Xian, and Guosheng Lin. Neural video depth stabilizer. In ICCV, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.406, + 0.482, + 0.474 + ], + "angle": 0, + "content": "[96] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. Advances in Neural Information Processing Systems (NeurIPS), 36, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.477, + 0.482, + 0.545 + ], + "angle": 0, + "content": "[97] Daniel Watson, William Chan, Ricardo Martin Brulla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.548, + 0.482, + 0.616 + ], + "angle": 0, + "content": "[98] Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain BRÉGIER, Yohann Cabon, Vaibhav ARORA, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jerome Revaud. CroCo: self-supervised pre-training for 3D vision tasks by cross-view completion. In Proc. NeurIPS, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.619, + 0.482, + 0.673 + ], + "angle": 0, + "content": "[99] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In CVPR, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.676, + 0.483, + 0.745 + ], + "angle": 0, + "content": "[100] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 1-10, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.747, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[101] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.818, + 0.483, + 0.9 + ], + "angle": 0, + "content": "[102] Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. Dynamiccafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision (ECCV), pages 399-417. Springer, 2024. 1, 3, 4, 6" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.093, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[103] Tian-Xing Xu, Xiangjun Gao, Wenbo Hu, Xiaoyu Li, Song-Hai Zhang, and Ying Shan. Geometrycrafter: Consistent geometry estimation for open-world videos with diffusion priors, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.149, + 0.907, + 0.231 + ], + "angle": 0, + "content": "[104] Jianing Yang, Alexander Sax, Kevin J. Liang, Mikael Henaff, Hao Tang, Ang Cao, Joyce Chai, Franziska Meier, and Matt Feiszli. Fast3r: Towards 3d reconstruction of \\(1000+\\) images in one forward pass. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.233, + 0.905, + 0.285 + ], + "angle": 0, + "content": "[105] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Ji-ashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proc. CVPR, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.288, + 0.905, + 0.342 + ], + "angle": 0, + "content": "[106] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. Advances in Neural Information Processing Systems (NeurIPS), 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.344, + 0.905, + 0.424 + ], + "angle": 0, + "content": "[107] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20331-20341, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.427, + 0.905, + 0.468 + ], + "angle": 0, + "content": "[108] David Yifan Yao, Albert J. Zhai, and Shenlong Wang. Uni4d: Unifying visual foundation models for 4d modeling from a single video, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.47, + 0.905, + 0.537 + ], + "angle": 0, + "content": "[109] Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.539, + 0.905, + 0.607 + ], + "angle": 0, + "content": "[110] Xumin Yu, Yongming Rao, Ziyi Wang, Zuyan Liu, Jiwen Lu, and Jie Zhou. Pointr: Diverse point cloud completion with geometry-aware transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12498-12507, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.609, + 0.905, + 0.649 + ], + "angle": 0, + "content": "[111] Yuheng Yuan, Qiuhong Shen, Xingyi Yang, and Xinchao Wang. 1000+ fps 4d gaussian splatting for dynamic scene rendering, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.651, + 0.905, + 0.719 + ], + "angle": 0, + "content": "[112] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. International Journal of Computer Vision (IJCV), pages 1-15, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.721, + 0.905, + 0.801 + ], + "angle": 0, + "content": "[113] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. In International Conference on Learning Representations (ICLR), 2025. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.804, + 0.905, + 0.858 + ], + "angle": 0, + "content": "[114] Jason Y Zhang, Amy Lin, Moneish Kumar, Tzu-Hsuan Yang, Deva Ramanan, and Shubham Tulsiani. Cameras as rays: Pose estimation via ray diffusion. In International Conference on Learning Representations (ICLR), 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.86, + 0.905, + 0.9 + ], + "angle": 0, + "content": "[115] Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiatao Gu. World-consistent video diffusion with explicit 3d" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.133, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.177 + ], + "angle": 0, + "content": "[116] Zhoutong Zhang, Forrester Cole, Zhengqi Li, Michael Rubinstein, Noah Snavely, and William T. Freeman. Structure and motion from casual videos. In European Conference on Computer Vision (ECCV), 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.179, + 0.482, + 0.247 + ], + "angle": 0, + "content": "[117] Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. Unleashing text-to-image diffusion models for visual perception. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5729-5739, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.249, + 0.482, + 0.305 + ], + "angle": 0, + "content": "[118] Chuanxia Zheng and Andrea Vedaldi. Free3d: Consistent novel view synthesis without 3d representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9720-9731, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.306, + 0.482, + 0.375 + ], + "angle": 0, + "content": "[119] Yang Zheng, Adam W Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 19855-19865, 2023. 6, 15" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.483, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.107, + 0.086, + 0.892, + 0.14 + ], + "angle": 0, + "content": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction Supplementary Material" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.157, + 0.484, + 0.219 + ], + "angle": 0, + "content": "In this supplementary material, we provide additional information to supplement our main submission. The code is available here for research purposes: github.com/ jzr99/Geo4D" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.252, + 0.313, + 0.269 + ], + "angle": 0, + "content": "6. Implementation Details" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.284, + 0.258, + 0.3 + ], + "angle": 0, + "content": "6.1. Training Dataset" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.313, + 0.484, + 0.478 + ], + "angle": 0, + "content": "As shown in Tab. 5, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. Although all datasets are synthetic, we found that some depth pixels are missing in PointOdyssey [119]. To address this, we apply max pooling to inpaint the missing pixels. During training, we sample each dataset according to the ratios in Tab. 5. For each sample, we select 16 frames from the sequence, with the sampling stride randomly chosen from \\(\\{1,2,3\\}\\) to allow our diffusion model to adapt to input videos with various frame rates." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.508, + 0.287, + 0.525 + ], + "angle": 0, + "content": "6.2. Optimization Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.537, + 0.484, + 0.901 + ], + "angle": 0, + "content": "The overall optimization process is outlined in Algorithm 1. We first predict all three modality maps using our diffusion model for each video clip \\( g \\). The predicted point maps are then roughly aligned based on the overlapping frames using the Umayama algorithm [84]. The camera intrinsic \\( \\mathbf{K}^k \\) is initialized by minimizing the projection error of the point map \\( X^{k,g^k} \\) in its reference (first) frame \\( k \\) within each window group \\( g^k \\). The camera extrinsics are then initialized using the RANSAC PnP algorithm. In the first stage of optimization, the point maps are roughly disentangled into camera pose and depth map. The disparity map is then aligned with the global depth inferred from point maps by solving Eq. (5) from the main paper to obtain the scale and shift parameters. The camera parameters extracted from the predicted ray map are aligned with the global camera trajectory based on the reference (first) frame of each video clip \\( g \\) via Eq. (8) from the main paper. After initializing all the alignment learnable parameters, including rotation \\( \\mathbf{R}_{*}^{g} \\), scale \\( \\lambda_{*}^{g} \\), and shift \\( \\beta_{*}^{g} \\) across different modalities, where \\( * \\in \\{\\mathrm{p},\\mathrm{d},\\mathrm{c}\\} \\), we jointly optimize all the learnable parameters by Eq. (10). Specifically, we set the weights for each loss term in Eq. (10) as \\( \\alpha_{1} = 1, \\alpha_{2} = 2, \\alpha_{3} = 0.005, \\alpha_{4} = 0.015 \\) to roughly equalize the scale of the different losses." + }, + { + "type": "algorithm", + "bbox": [ + 0.515, + 0.156, + 0.905, + 0.407 + ], + "angle": 0, + "content": "Algorithm 1 Multi-Modal Alignment Optimization \n1: \\( X^{i,g}, D^{i,g}, r^{i,g} \\gets \\) Predicted by our diffusion model \n2: \\( D_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g} \\gets \\) Initialized by Umayama algorithm \n3: \\( K_{\\mathrm{p}}^{k} \\gets \\) Optimized from \\( X^{k,g^k} \\) \n4: \\( R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i} \\gets \\) Initialized by Ransac PnP from pointmaps \\( X^i \\) \n5: \\( R_{\\mathrm{c}}^{i,g}, o_{\\mathrm{c}}^{i,g} \\gets \\) Initialized by Eqs. (6) and (7) from raymaps \\( r^{i,g} \\) \n6: repeat \n7: if Iteration = Align start iteration then \n8: \\( \\lambda_{\\mathrm{d}}^{g}, \\beta_{\\mathrm{d}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{d}} \\) (Eq. (5)) \n9: \\( R_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{c}} \\) (Eq. (8)) \n10: else if Iteration < Align start iteration then \n11: \\( D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g}, \\gets \\arg \\min \\mathcal{L}_{\\mathrm{p}} + \\mathcal{L}_{\\mathrm{s}} \\) \n12: else \n13: \\( D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{*}^{g}, R_{*}^{g}, \\beta_{*}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{all}} \\) \n14: end if \n15: until max loop reached" + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.431, + 0.905, + 0.515 + ], + "angle": 0, + "content": "
DatasetScene type#Frames#SequencesRatio
PointOdyssey [119]Indoors/Outdoors200K13116.7%
TartanAir [93]Indoors/Outdoors1000K16316.7%
Spring [50]Outdoors6K3716.7%
VirtualKITTI [6]Driving43K32016.7%
BEDLAM [2]Indoors/Outdoors380K10K33.3%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.517, + 0.905, + 0.544 + ], + "angle": 0, + "content": "Table 5. Details of training datasets. Our method only uses synthetic datasets for training." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.566, + 0.905, + 0.663 + ], + "angle": 0, + "content": "
StepsVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
10.22170.70.2340.0720.753
50.20573.50.1850.0630.547
100.20773.20.2120.0710.508
250.22072.20.2110.0740.564
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.664, + 0.905, + 0.692 + ], + "angle": 0, + "content": "Table 6. Ablation study for the DDIM sampling steps. on the Sintel [5] dataset." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.725, + 0.704, + 0.742 + ], + "angle": 0, + "content": "7. Additional Analysis" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.754, + 0.859, + 0.771 + ], + "angle": 0, + "content": "7.1. Ablating the Number of Denoising Steps" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.903 + ], + "angle": 0, + "content": "We study the influence of the number of denoising steps during inference. As shown in Tab. 6, the model achieves optimal performance after around 5 steps. Compared to the video generation task, where a larger number of denoising steps usually produces a more detailed generated video, 4D reconstruction is a more deterministic task, which requires fewer steps. Similar phenomena are also observed in [22], which uses a video generator for video depth estimation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.09, + 0.086, + 0.912, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.708, + 0.908, + 0.737 + ], + "angle": 0, + "content": "Figure 5. Additional qualitative results. Our method generalizes well to various scenes with different 4D objects and performs robustly against different camera and object motions." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.76, + 0.481, + 0.824 + ], + "angle": 0, + "content": "
MethodVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
w/o fine-tuned0.21272.10.1920.0610.577
w fine-tuned0.20573.50.1850.0630.547
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.825, + 0.483, + 0.867 + ], + "angle": 0, + "content": "Table 7. Ablation study for the fine-tuned point map VAE on the Sintel [5] dataset. The fine-tuned point map VAE performs better than the original one." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.762, + 0.907, + 0.78 + ], + "angle": 0, + "content": "7.2. Ablation Study for Fine-Tuned Point Map VAE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.907, + 0.903 + ], + "angle": 0, + "content": "As stated in the main paper, we added an additional branch to predict the uncertainty for our point map VAE and fine-tuned it based on Eq. 3. We perform an ablation study on our fine-tuning strategy. As shown in Tab. 7, our fine-tuned point map VAE achieves consistently better performance on both video depth estimation and camera pose estimation tasks compared with the original pre-trained image VAE," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.483, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.217, + 0.479, + 0.23 + ], + "angle": 0, + "content": "Figure 6. Visualization of different geometric modality maps." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.247, + 0.483, + 0.276 + ], + "angle": 0, + "content": "demonstrating the necessity and effectiveness of our finetuning strategy." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.287, + 0.436, + 0.302 + ], + "angle": 0, + "content": "7.3. Analysis of Multi-Modal Representation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.308, + 0.484, + 0.581 + ], + "angle": 0, + "content": "Point maps (PMs) and disparity maps (DMs) are complementary. DMs better represent near objects, while PMs are more depth-agnostic (e.g., human vs house in Fig. 6 (b,c)). As in prior work, DMs are affine invariant (which here makes them range-compatible with the pretrained RGB VAE); their scale and shift, needed to recover undistorted geometry, are inferred by matching them to the predicted PMs. Ray maps (RMs) help infer the camera pose when PMs fail to represent points at infinity (such as the sky in Fig. 6 (e)). We observed that PMs tend to be noisier than DMs, so we prioritized modeling the PMs' uncertainty. Per-pixel uncertainty for ray maps are less meaningful given the high degree of correlation between individual rays. During multi-modal alignment, we align global point clouds with DMs in disparity space and with PMs in linear space. This naturally gives more weight to near points, which tend to be estimated well by DMs, and weighs points based on uncertainty with PMs, thus taking advantage of both modalities." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.593, + 0.226, + 0.608 + ], + "angle": 0, + "content": "8. Visualization" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.619, + 0.484, + 0.679 + ], + "angle": 0, + "content": "Figure 5 shows additional visualizations for indoor, outdoor, and driving scenes. Although our model is only trained on synthetic datasets, it generalizes to real-world data with diverse objects and motions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.693, + 0.214, + 0.708 + ], + "angle": 0, + "content": "9. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.718, + 0.483, + 0.899 + ], + "angle": 0, + "content": "Although our method performs well and generalizes to a wide range of in-the-wild videos, it can struggle in cases involving significant changes in focal length or extreme camera motion throughout a sequence. This limitation likely stems from the lack of focal length variation in our training data. Incorporating more sequences with diverse camera movements and zooming effects could help mitigate this issue. Moreover, due to the inherent temporal attention mechanism in our network architecture, our approach currently supports only monocular video input. Extending the method to handle multi-view images or videos is a promising direction for future work." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "17" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7b9022b88ff71343467398b65015165674dc10bb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666a6c1dd55700069d01cd6f6984c091c08c149d908a6b7b074d0c53f130357b +size 19362648 diff --git a/data/2025/2504_07xxx/2504.07961/full.md b/data/2025/2504_07xxx/2504.07961/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f67d7538d7839db23a6018c353f3157f7d10f016 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/full.md @@ -0,0 +1,464 @@ +# Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction + +Zeren Jiang1 Chuanxia Zheng1 Iro Laina1 Diane Larlus2 Andrea Vedaldi1 +1Visual Geometry Group, University of Oxford 2Naver Labs Europe + +{zeren, cxzheng, iro, vedaldi}@robots.ox.ac.uk diane.larlus@naverlabs.com + +geo4d.github.io + +![](images/0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg) +Figure 1. Geo4D repurposes a video diffusion model [102] for monocular 4D reconstruction. It uses only synthetic data for training, yet generalizes well to out-of-domain real videos. It predicts several geometric modalities, including point maps, disparity maps, and ray maps, fusing and aligning them to obtain state-of-the-art dynamic reconstruction even for scenes with extreme object and camera motion. + +# Abstract + +We introduce Geo4D, a method to repurpose video diffusion models for monocular 3D reconstruction of dynamic scenes. By leveraging the strong dynamic priors captured by large-scale pre-trained video models, Geo4D can be trained using only synthetic data while generalizing well to real data in a zero-shot manner. Geo4D predicts several complementary geometric modalities, namely point, disparity, and ray maps. We propose a new multi-modal alignment algorithm to align and fuse these modalities, as well as a sliding window approach at inference time, thus enabling robust and accurate 4D reconstruction of long videos. Extensive experiments across multiple benchmarks show that Geo4D significantly surpasses state-of-the-art video depth estimation methods. + +# 1. Introduction + +We consider the problem of feed-forward 4D reconstruction, which involves learning a neural network to reconstruct the 3D geometry of a dynamic scene from a monoc + +ular video. This task is particularly challenging for videos captured in uncontrolled settings, such as those shot with handheld cameras or downloaded from the Internet. However, a robust solution to this problem would have a tremendous impact on a wide range of applications, from video understanding to computer graphics and robotics. + +4D reconstruction from videos is related to multi-view static 3D reconstruction, which is typically addressed using methods from visual geometry like bundle adjustment. Recent neural networks [89, 92] have emerged as powerful tools that can replace, or at least complement, bundle adjustment. They excel especially in difficult reconstruction scenarios, involving, e.g., textureless surfaces and occlusions, thanks to the priors they learn from data. Given the additional challenges involved in 4D reconstruction, we expect that such priors would benefit this task even more. + +In fact, powerful networks like DUSt3R [92], designed for static multi-view 3D reconstruction, have recently been extended to the dynamic case, for example by MonST3R [113]. However, these models are heavily engineered to solve specific 3D reconstruction problems. Most importantly, they require significant amounts of training data with 3D annotations for supervision. Such data + +is difficult to collect for dynamic scenes, especially in real life. This suggests using 4D synthetic training data instead. However, this data is difficult to obtain at scale, and the gap with the real world can compromise generalization. + +One way to mitigate this problem is to pre-train the model on tasks related to 3D reconstruction for which real data is easily available. For example, DUSt3R [92] and derived methods [113] use image matching for pretraining [98]. Here, we suggest starting instead from an off-the-shelf video generator. Video generators are powerful models, often considered proxies of world simulators [37, 54, 59]. More importantly for us, the videos they generate demonstrate an understanding of effects like camera motion and perspective, as well as typical object motion in the context of a scene. However, they only generate pixels, leaving any 3D or 4D understanding implicit and thus not directly actionable. + +In this work, we show that a pre-trained off-the-shelf video generator can be turned into an effective monocular feed-forward 4D reconstructor. To this end, we introduce Geo4D, a novel approach for adapting Video Generators for Geometric 4D Reconstruction. With Geo4D, we demonstrate that these generic video architectures can successfully solve complex 4D reconstruction tasks, which is a step towards future video foundation models that natively integrate 4D geometry. Prior work such as Marigold [28] and concurrent work DepthCrafter [22] have looked at adapting, respectively, image and video generators for depth estimation. Here, we go one step further and consider the full recovery of 4D geometry, including camera motion and dynamic 3D structure. + +With Geo4D, our goal is to make 4D geometry explicit in the video generator. This in turn requires us to choose an explicit representation of 4D information. We follow DUSt3R and adopt its viewpoint-invariant point maps. Namely, we associate each pixel in each frame with the coordinate of the corresponding 3D point, expressed relative to the first frame in the video, used as a reference. Hence, the static parts of the point clouds extracted from the different frames line up, and the dynamic parts form a 3D 'trace' of the motion of the dynamic objects, as shown in Fig. 1. + +Viewpoint-invariant point maps are a powerful representation because they implicitly encode the camera motion and intrinsics and can be easily predicted by a neural network [92]. However, they are not necessarily the best representation for all parts of the scene, particularly for points far away from the observer or even at infinity, such as the sky. We thus consider two more modalities with better dynamic range, namely disparity maps and camera ray maps. Ray maps, in particular, are defined for all image pixels regardless of the scene geometry. + +Our model thus predicts three modalities: point, disparity, and ray maps. These modalities are redundant in prin + +ciple, but complementary in practice. At test time, we reconcile them via a fast, global optimization step and show that this leads to significantly more robust 4D reconstructions. Due to depth and ray map prediction, we show very strong empirical results on video depth estimation and in the recovery of the camera orientation. + +One of the challenges of monocular 4D reconstruction is that it is ambiguous, significantly more so than static 3D reconstruction. However, the stochastic nature of the video generator can help deal with this ambiguity. We also introduce uncertainty maps in the encoder-decoder architecture that processes the geometric maps, and integrate them into the multi-modal alignment process. + +Overall, our contributions are as follows. (i) We introduce Geo4D, a 4D feed-forward network for dynamic scene reconstruction that builds on top of an off-the-shelf video generator. (ii) We suggest generating multiple partially redundant geometric modalities and fusing them at test time via lightweight optimization. (iii) We show the benefits of this multi-modal fusion in terms of improved 4D prediction accuracy. Experiments show that this model can reconstruct even highly dynamic scenes (such as the drifting scene in DAVIS [23] presented in Fig. 1) and outperforms current video depth and camera rotation estimation methods. + +# 2. Related Work + +# 2.1. Dynamic Scene Reconstruction + +Static 3D reconstruction. Feed-forward 3D reconstruction has achieved remarkable success across various representations, including voxels [11, 74, 83], meshes [18, 72, 90], and point clouds [41, 110]. These advancements have been further driven by implicit neural representations [52, 56, 60, 75] and the emergence of 3D Gaussian Splatting (3D-GS) [7, 9, 29, 76, 79, 80]. Recently, DUS3R [92] introduced a point map representation for scene-level 3D reconstruction, followed by [35, 86, 89, 104]. However, these models predominantly focus on static 3D reconstruction. Our approach also uses point maps as a representation but extends them to handle dynamic scenes, which present additional challenges due to object motion over time. + +Iterative 4D reconstruction. Iterative or optimization-based approaches reconstruct 4D models from monocular videos by iteratively fitting the observed data. Classical techniques often rely on RGB-D sensors [24, 53], but such steps are impractical for many real-world scenes. Recently, with advancements in neural representations [52, 56], NeRF-based approaches [27, 38, 39, 57, 58, 62] have shown impressive results. However, volume rendering in NeRF is computationally expensive. Convergence and rendering speed can be improved by using 3D-GS representations [12, 29, 34, 43, 91, 99, 107, 111], which reduce but do not eliminate the cost of iterative optimization. Very + +![](images/d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg) +Figure 2. Overview of Geo4D. During training, video conditions are injected by locally concatenating the latent feature of the video with diffused geometric features $\mathbf{z}_t^{\mathrm{X}},\mathbf{z}_t^{\mathrm{D}},\mathbf{z}_t^{\mathrm{r}}$ and are injected globally via cross-attention in the denoising U-Net, after CLIP encoding and a query transformer. The U-Net is fine-tuned via Eq. 2. During inference, iteratively denoised latent features $\hat{\mathbf{z}}_0^{\mathrm{X}},\hat{\mathbf{z}}_0^{\mathrm{D}},\hat{\mathbf{z}}_0^{\mathrm{r}}$ are decoded by the fine-tuned VAE decoder, followed by multi-modal alignment optimization for coherent 4D reconstruction. + +recently, MegaSaM [40] achieved highly accurate and robust camera pose estimation and reconstruction for dynamic videos, but it requires accurate monocular depth priors. Similarly, Uni4D [108] produces accurate 4D reconstructions by leveraging various visual foundation models and performing multi-stage bundle adjustment. In contrast, our approach is a diffusion-driven feed-forward framework, which eliminates the need for per-video bundle adjustment and external depth estimation models. + +Feed-forward 4D reconstruction. Similar to our approach, recent works have started to explore feed-forward 4D reconstruction for dynamic scenes: a monocular video with dynamic objects is processed by a neural network to recover a 4D representation. For objects, L4GM [66] andAnimate3D [26] first generate multi-view videos from a monocular video input, and subsequently apply 3D-GS [29] to reconstruct a temporally consistent 4D model. For scenes, a notable example is MonST3R [113], which adapts the static scene reconstruction of DUSt3R [92] to handle dynamic scenes. Very recently, Easi3R [8] applies attention adaptation during inference and performs 4D reconstruction based on DUSt3R [92] in an efficient, training-free manner. + +# 2.2. Geometric Diffusion Models + +Our method builds upon advancements in video diffusion models [3, 4, 16, 19, 21, 31, 73, 88, 94, 102, 112], which generate temporally consistent videos from text or image prompts. Recent studies have explored the rich 3D priors embedded within large-scale pre-trained diffusion models, employing either knowledge distillation [25, 42, 51, 61, 87, 96] or fine-tuning [20, 36, 45-47, 71, 85, 118] for 3D reconstruction and generation. While these methods have significantly advanced single-object 3D reconstruction from sparse inputs, they remain largely constrained to static, isolated objects centered within an image. Beyond single + +object reconstruction, several recent efforts have extended pre-trained diffusion models to tackle scene-level 3D tasks, such as optical flow estimation [69], view synthesis [10, 15, 44, 68, 81, 109], depth estimation [13, 28, 117], and normal estimation [14, 33, 63]. More related to our approach, Matrix3D [49] jointly predicts depth and camera parameters, and WVD [115] introduces a hybrid RGB+point map representation for scene reconstruction. However, these approaches assume static 3D environments, whereas we address dynamic 4D scene reconstruction, which is a much harder problem due to object motion across time. + +More closely related to our approach, concurrent GeometryCrafter [103] introduced a point map VAE with a dual encoder-decoder architecture to improve reconstruction accuracy. However, their point maps are defined in individual camera coordinates, necessitating the use of additional segmentation [30] and tracking models [101] to recover the global point map and estimate camera poses. Aether [82], on the other hand, outputs depth maps and ray maps from a video diffusion model for 4D reconstruction. In contrast, our experiments demonstrate that performance can be significantly enhanced by jointly predicting multiple geometric modalities that capture diverse dynamic ranges, ensuring better temporal coherence and robustness. Importantly, our approach is self-contained and does not rely on external models, enhancing its generality and reliability. + +# 3. Method + +Our goal is to learn a neural network $f_{\theta}$ that can reconstruct dynamic 3D scenes from monocular videos. Given as input a monocular video $\mathcal{I} = \{I^i\}_{i=1}^N$ consisting of $N$ frames, where each frame is an RGB image $I^i \in \mathbb{R}^{H \times W \times 3}$ , the network $f_{\theta}$ returns a representation of its 4D geometry: + +$$ +f _ {\boldsymbol {\theta}}: \left\{\boldsymbol {I} ^ {i} \right\} _ {i = 1} ^ {N} \mapsto \left\{\left(\boldsymbol {D} ^ {i}, \boldsymbol {X} ^ {i}, \boldsymbol {r} ^ {i}\right) \right\} _ {i = 1} ^ {N}. \tag {1} +$$ + +The network computes the disparity map $D^{i}\in \mathbb{R}^{H\times W\times 1}$ the viewpoint-invariant point map $X^{i}\in \mathbb{R}^{H\times W\times 3}$ , and the ray map $\pmb {r}^i\in \mathbb{R}^{H\times W\times 6}$ for each frame $I^i$ $i = 1,\dots ,N$ . As we discuss in Sec. 3.2, these quantities collectively represent the 4D geometry of a scene, including its dynamic structure and time-varying camera extrinsic and intrinsic parameters. No camera parameters are provided as input; these are implicitly estimated by the model as well. + +We implement $f_{\theta}$ as a video diffusion model, where $\theta$ are the learnable parameters. We discuss the relevant background on video diffusion models in Sec. 3.1. Then, in Sec. 3.2, we describe how we extend the model to predict the three modalities of the 4D geometry. Finally, in Sec. 3.3, we describe how we fuse and align these modalities to obtain a coherent 4D reconstruction at test time. + +# 3.1. Preliminaries: Video Diffusion Model + +Our key insight is that by building on pre-trained video diffusion models, our approach can exploit the strong motion and scene geometry priors inherently encoded within these models. Specifically, we build Geo4D on top of DynamiCrafter [102], a "foundation" video diffusion model. DynamiCrafter is a latent diffusion model [67]: it uses a variational autoencoder (VAE) to obtain a more compact video representation and thus reduce computational complexity. During training, a target sequence $\mathcal{X} = \pmb{x}^{1:N}$ is first encoded into the latent space using the encoder $z_0^{1:N} = \mathcal{E}(\pmb{x}^{1:N})$ , and then perturbed by $\pmb{z}_t^{1:N} = \sqrt{\bar{\alpha}_t}\pmb{z}_0^{1:N} + \sqrt{1 - \bar{\alpha}_t}\epsilon^{1:N}$ , where $\epsilon \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ is Gaussian noise, and $\bar{\alpha}_t$ is the noise level at step $t$ of $T$ noisings steps. The denoising network $\epsilon_{\theta}$ is then trained to reverse this noisng process by optimizing the following objective: + +$$ +\min _ {\boldsymbol {\theta}} \mathbb {E} _ {(\boldsymbol {x} ^ {1: N}, y), t, \epsilon^ {1: N} \sim \mathcal {N} (\boldsymbol {0}, \boldsymbol {I})} \left\| \epsilon^ {1: N} - \epsilon_ {\boldsymbol {\theta}} \left(\boldsymbol {z} _ {t} ^ {1: N}, t, y\right) \right\| _ {2} ^ {2}, \tag {2} +$$ + +where $y$ is the conditional input. Once trained, the model generates a video prompted by $y$ via iteratively denoising from pure noise $\mathbf{z}_T^{1:N}$ , and then decoding the denoised latent with a decoder $\hat{\mathcal{X}} = \mathcal{D}(\hat{\mathbf{z}}_0^{1:N})$ . + +# 3.2. Multi-modal Geometric 4D Diffusion + +We first provide a more precise description of the 4D multimodal representation output by our model, and then explain how it is encoded in the latent space for generation. + +Multi-modal geometric representations. The dynamic 3D structure of a scene is represented by a sequence of point maps $\{\pmb{X}^i\}_{i=1}^N$ , one for each of its $N$ frames. Let $(u, v)$ denote the pixel coordinates in the image plane. Then, the value $X_{uv}^i \in \mathbb{R}^3$ is the 3D coordinate of the scene point that lands at pixel $(u, v)$ in frame $I^i$ , expressed in the reference frame of camera $i = 1$ . Because the reference frame is fixed and independent of the time-varying viewpoint, we call these point maps viewpoint-invariant. The + +advantages of this representation are convincingly demonstrated by DUSt3R [92]. For a static scene, or by knowing which image pixels correspond to the static part of a scene, knowledge of the point maps allows recovery of the intrinsic and extrinsic camera parameters as well as the scene depth. This is done by solving an optimization problem that aligns the dynamic point maps with a pinhole camera model. + +As noted in Sec. 1, while point maps $\{\pmb{X}^i\}_{i=1}^N$ fully encode the 4D geometry of the scene, they are not effective for all parts of the scene. Their dynamic range is limited, and they are not even defined for points at infinity (e.g. sky). Hence, we consider two additional modalities: disparity maps $\{\pmb{D}^i\}_{i=1}^N$ and camera ray maps $\{\pmb{r}^i\}_{i=1}^N$ , also encouraged by prior evidence [14, 33, 49] that diffusion models can benefit from learning to predict multiple quantities. Disparity maps are not viewpoint-invariant, but have a better dynamic range than point maps (the disparity is zero for points at infinity). Ray maps represent only the camera parameters and are defined for all image pixels, independent of the scene geometry. For the disparity map, $D_{uv}^i$ is the disparity (inverse depth) of the scene point that lands at pixel $(u,v)$ , as seen in frame $I^i$ . For the ray map, we adopt Plücker coordinates [75, 97, 118], i.e., $\pmb{r}_{uv} = (\pmb{d}_{uv}, \pmb{m}_{uv})$ , where $\pmb{d}_{uv} = \mathbf{R}^\top \mathbf{K}^{-1}(u,v,1)^\top$ is the ray direction, and $\pmb{m}_{uv} = -\mathbf{R}^\top \mathbf{t} \times \pmb{d}_{uv}$ , where $(\mathbf{R}, \mathbf{K}, \mathbf{t})$ are the camera's rotation, calibration, and translation parameters. + +Multi-modal latent encoding. The three modalities come in the form of images and can thus be naturally predicted by the video diffusion architecture. However, this requires first mapping them to the latent space, for which we need suitable versions of the encoder $\mathcal{E}$ and decoder $\mathcal{D}$ from Sec. 3.1. Related prior work [14, 28] for depth prediction simply repurposes a pre-trained image encoder-decoder without modification. We found this to work well for disparity and ray maps, but not for point maps. Hence, for the point maps only, we fine-tune the pre-trained decoder $\mathcal{D}$ using the following objective function [100]: + +$$ +\mathcal {L} = - \sum_ {u v} \ln \frac {1}{\sqrt {2} \sigma_ {u v}} \exp - \frac {\sqrt {2} \ell_ {1} (\mathcal {D} (\mathcal {E} (\boldsymbol {X})) _ {u v} , \boldsymbol {X} _ {u v})}{\sigma_ {u v}}, \tag {3} +$$ + +where $\sigma \in \mathbb{R}^{H\times W}$ is the uncertainty of the reconstructed point map, which is also predicted by an additional branch of our VAE decoder. We leave the encoder $\mathcal{E}$ unchanged to modify the latent space as little as possible; instead, we normalize the point maps to the range $[-1,1]$ to make them more compatible with the pre-trained image encoder. + +Video conditioning. The original video diffusion model is conditioned on a single image, but here we need to condition it on the entire input video $\mathcal{I} = \{I^i\}_{i=1}^N$ . To this end, we use a hybrid conditioning mechanism with two streams. + +As shown in Fig. 2, in one stream, we extract a global representation of each frame $\pmb{I}^i$ by passing it to + +CLIP [64] followed by a lightweight learnable query transformer [1]. These vectors are incorporated in the transformer via cross-attention layers injected in each U-Net block. In the other stream, we extract local spatial features from the VAE encoder and concatenate them channel-wise to the noised latents, encoding the generated 4D modalities $\{(D^i,X^i,r^i)\}_{i = 1}^N$ . + +# 3.3. Multi-Modal Alignment + +As noted, Geo4D predicts several non-independent geometric modalities. Furthermore, processing all frames of a long monocular video simultaneously with a video diffusion model is computationally prohibitive. Therefore, during inference, we use a temporal sliding window that segments the video into multiple overlapping clips, with partial overlap to facilitate joining them. The goal of this section is to fuse the resulting multi-modal and multi-window data into a single, coherent reconstruction of the entire video. + +Temporal sliding window. Given a video $\mathcal{I} = \{\pmb{I}^i\}_{i=1}^N$ with $N$ frames, we divide it into several video clips $\mathcal{G} = \{g^k\}$ , $k \in S$ , where each clip $g^k$ contains $V$ frames $\{I^i\}_{i=k}^{k+V-1}$ , and the set of starting indices is $\mathcal{S} = \{0, s, 2s, \ldots, \left\lfloor \frac{N-V}{s} \right\rfloor s\} \cup \{N-V\}$ . Here, $s$ is the sliding window stride. The final term $\{N-V\}$ ensures that the last clip always includes the final frames of the video. + +Alignment objectives. First, given the predicted point maps $X^{i,g}$ for each frame $i$ in each video clip $g \in \mathcal{G}$ , we derive corresponding globally aligned point maps in world coordinates, as well as the relative camera motion and scale parameters. We denote these quantities with the p subscript to emphasize that they are inferred from the point map predictions. To do so, we extend the pairwise global alignment loss from DUSt3R to a group-wise one: + +$$ +\mathcal {L} _ {\mathrm {p}} \left(\boldsymbol {X}, \lambda_ {\mathrm {p}} ^ {g}, \boldsymbol {P} _ {\mathrm {p}} ^ {g}\right) = \sum_ {g \in \mathcal {G}} \sum_ {i \in g} \sum_ {u v} \left\| \frac {\boldsymbol {X} _ {u v} ^ {i} - \lambda_ {\mathrm {p}} ^ {g} \boldsymbol {P} _ {\mathrm {p}} ^ {g} \boldsymbol {X} _ {u v} ^ {i , g}}{\boldsymbol {\sigma} _ {u v} ^ {i , g}} \right\| _ {1}, \tag {4} +$$ + +where $\lambda_{\mathrm{p}}^{g}$ and $P_{\mathrm{p}}^{g} = [\mathbf{R}_{\mathrm{p}}^{g}|\beta_{\mathrm{p}}^{g}]$ denote the group-wise scale and transformation matrix that align the group-relative point maps $X^{i,g}$ to the point maps $X^i$ expressed in the global reference frame. $\sigma_{uv}^{i,g}$ denotes the uncertainty of the point map for frame $i$ in group $g$ at pixel $(u,v)$ . We further parameterize each of these point maps as $X_{uv}^{i} = \mathbf{R}_{\mathrm{p}}^{i^{\top}}\mathbf{K}_{\mathrm{p}}^{i^{-1}}D_{\mathrm{p},uv}^{i^{-1}}(u,v,1) + o_{\mathrm{p}}^{i}$ in terms of each camera's calibration $\mathbf{K}_{\mathrm{p}}^{i}$ , world-to-camera rotation $\mathbf{R}_{\mathrm{p}}^{i}$ , and center $o_{\mathrm{p}}^{i}$ expressed in the global reference frame, and the disparity map $D_{\mathrm{p}}^{i}$ . Substituting this expression into the loss function (4) and minimizing it, we can thus recover $\mathbf{K}_{\mathrm{p}}^{i},\mathbf{R}_{\mathrm{p}}^{i},o_{\mathrm{p}}^{i}, D_{\mathrm{p}}^{i},\lambda_{\mathrm{p}}^{g},P_{\mathrm{p}}^{g}$ from the predicted point maps. + +The steps above infer the disparity maps $D_{\mathrm{p}}^{i}$ from the point maps, but the model also predicts disparity maps $D_{\mathrm{d}}^{i}$ directly, where the d subscript denotes disparity prediction. + +We introduce the following loss to align them: + +$$ +\mathcal {L} _ {\mathrm {d}} \left(\boldsymbol {D} _ {\mathrm {p}}, \lambda_ {\mathrm {d}} ^ {g}, \beta_ {\mathrm {d}} ^ {g}\right) = \sum_ {g \in \mathcal {G}} \sum_ {i \in g} \left\| \boldsymbol {D} _ {\mathrm {p}} ^ {i} - \lambda_ {\mathrm {d}} ^ {g} \boldsymbol {D} _ {d} ^ {i, g} - \beta_ {\mathrm {d}} ^ {g} \right\| _ {1}, \tag {5} +$$ + +where $\lambda_{\mathrm{d}}^{g}$ and $\beta_{\mathrm{d}}^{g}$ are optimized scale and shift parameters. + +Finally, the ray maps $\pmb{r}$ also encode camera pose. To align them with the global camera parameters $(\mathbf{R}_{\mathrm{p}},\mathbf{K}_{\mathrm{p}},\boldsymbol{o}_{\mathrm{p}})$ obtained from the point map, we first solve an optimization problem to extract the camera parameters from the ray map $\pmb{r}^{i,g} = \langle \pmb{d}^{i,g},\pmb{m}^{i,g}\rangle$ for each group $g$ at frame $i$ . Following Ray Diffusion [114], the camera center $\pmb{o}_{\mathrm{c}}^{i,g}$ is solved by finding the 3D world coordinate closest to the intersection of all rays: + +$$ +\boldsymbol {o} _ {\mathrm {c}} ^ {i, g} = \arg \min _ {\boldsymbol {p} \in \mathbb {R} ^ {3}} \sum_ {u \in H, v \in W} \| \boldsymbol {p} \times \boldsymbol {d} _ {u v} ^ {i, g} - \boldsymbol {m} _ {u v} ^ {i, g} \| ^ {2}. \tag {6} +$$ + +The camera extrinsics are solved by optimizing for the matrix $\mathbf{H}$ that transforms the predicted per-pixel ray directions $d_{uv}^{i,g}$ to the ray directions $\mathbf{u}_{uv}$ of a canonical camera: + +$$ +\mathbf {H} ^ {i, g} = \underset {\| \mathbf {H} \| = 1} {\arg \min } \sum_ {u \in H, v \in W} \left\| \mathbf {H} d _ {u v} ^ {i, g} \times \mathbf {u} _ {u v} \right\|. \tag {7} +$$ + +Then the world-to-camera rotation matrix $\mathbf{R}_c^{i,g}$ and intrinsic matrix $\mathbf{K}_c^{i,g}$ can be solved using the RQ-decomposition of $\mathbf{H}^{i,g}$ . Finally, the camera trajectory alignment loss is: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {c}} \left(\mathbf {R} _ {\mathrm {p}}, \boldsymbol {o} _ {\mathrm {p}}, \mathbf {R} _ {\mathrm {c}} ^ {g}, \beta_ {\mathrm {c}} ^ {g}, \lambda_ {\mathrm {c}} ^ {g}\right) = \sum_ {g \in \mathcal {G}} \sum_ {i \in g} \left(\left\| \mathbf {R} _ {\mathrm {p}} ^ {i ^ {\top}} \mathbf {R} _ {\mathrm {c}} ^ {g} \mathbf {R} _ {\mathrm {c}} ^ {i, g} - \boldsymbol {I} \right\| _ {\mathrm {f}} \right. \\ \left. + \left\| \lambda_ {\mathrm {c}} ^ {g} \boldsymbol {o} _ {\mathrm {c}} ^ {i, g} + \beta_ {\mathrm {c}} ^ {g} - \boldsymbol {o} _ {\mathrm {p}} ^ {i} \right\| _ {2}\right), \tag {8} \\ \end{array} +$$ + +where $R_{\mathrm{c}}^{g}, \beta_{\mathrm{c}}^{g}, \lambda_{\mathrm{c}}^{g}$ are learnable group-wise rotation matrix, translation vector, and scale, respectively, to align the global camera trajectory $(\mathbf{R}_p, \mathbf{o}_p)$ and the predicted ones $(\mathbf{R}_c, \mathbf{o}_c)$ . Following MonST3R [113], we also use a loss to smooth the camera trajectory: + +$$ +\mathcal {L} _ {\mathrm {s}} \left(\mathbf {R} _ {\mathrm {p}}, \boldsymbol {o} _ {p}\right) = \sum_ {i = 1} ^ {N} \left(\left\| \mathbf {R} _ {\mathrm {p}} ^ {i ^ {\top}} \mathbf {R} _ {\mathrm {p}} ^ {i + 1} - \boldsymbol {I} \right\| _ {\mathrm {f}} + \left\| \boldsymbol {o} _ {\mathrm {p}} ^ {i + 1} - \boldsymbol {o} _ {\mathrm {p}} ^ {i} \right\| _ {2}\right). \tag {9} +$$ + +The final optimization objective is the weighted combination of the losses above: + +$$ +\mathcal {L} _ {\text {a l l}} = \alpha_ {1} \mathcal {L} _ {\mathrm {p}} + \alpha_ {2} \mathcal {L} _ {\mathrm {d}} + \alpha_ {3} \mathcal {L} _ {\mathrm {c}} + \alpha_ {4} \mathcal {L} _ {\mathrm {s}}. \tag {10} +$$ + +A note on the invariants. The model predicts point maps, disparity maps, and ray map origins up to scale, as this cannot be uniquely determined from a monocular video. The disparity map is also recovered up to a translation, which discounts the focal length (this is sometimes difficult to estimate due to the dolly zoom effect). Likewise, the ray map origin is recovered up to a shift, necessary to allow normalizing these maps. + +
CategoryMethodSintel [5]Bonn [55]KITTI [17]
Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑
Single-frame depthMarigold [28]0.53251.50.09193.10.14979.6
Depth-Anything-V2 [106]0.36755.40.10692.10.14080.4
Video depthNVDS [95]0.40848.30.16776.60.25358.8
ChronoDepth [70]0.68748.60.10091.10.16775.9
DepthCrafter* [22]0.27069.70.07197.20.10489.6
Video depth & Camera poseRobust-CVD [32]0.70347.8
CasualSAM [116]0.38754.70.16973.70.24662.2
MonST3R [113]0.33558.50.06396.40.10489.5
Geo4D (Ours)0.20573.50.05997.20.08693.7
+ +Table 1. Video depth estimation on Sintel [5], Bonn [55] and KITTI [17] datasets. We follow the evaluation protocols established in recent MonST3R [113] for a fair comparison. Notably, results for DepthCrafter* are reported from its latest version (v1.0.1). The Best and the second best results are highlighted. + +# 4. Experiments + +# 4.1. Experimental Settings + +Training datasets. Geo4D is trained exclusively on synthetic datasets, yet demonstrates strong generalization to real-world videos. Specifically, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. See the Supp. Mat Tab. 5 for details. + +Training. Our Geo4D is initialized with the weights of DynamiCrafter [102] and trained using AdamW [48] with a learning rate of $1 \times 10^{-5}$ and a batch size of 32. We use a progressive training strategy to improve convergence and stability. First, we train the model to generate a single geometric modality, i.e., the point maps, at a fixed resolution of $512 \times 320$ . Next, we introduce a multi-resolution training scheme to improve generalization and robustness, which includes various resolutions: $512 \times 384$ , $512 \times 320$ , $576 \times 256$ , $640 \times 192$ . Finally, we progressively add additional geometric modalities, i.e., the ray and depth maps. Training is conducted on 4 NVIDIA H100 GPUs with a total training time of approximately one week. + +Inference. As described in Sec. 3.2, given an $N$ -frame video as input, we first split it into overlapping clips $\mathcal{G}$ , each containing $V = 16$ frames, with a stride of $s = 4$ . Each video clip is encoded and fed to the diffusion model to sample multi-modal 4D parameters $(X^{i,g}, D^{i,g}, r^{i,g})$ for the video. For sampling, we use DDIM [77] with 5 steps. Finally, the alignment algorithm in Sec. 3.2 is used to fuse the clips into a globally coherent 4D reconstruction of the entire video. + +# 4.2. Video Depth Estimation + +Testing data. Our hypothesis is that, despite being trained on synthetic data, our model can generalize well to out-of-distribution synthetic and real data, as it is based on a pre-trained video diffusion model. To test this hypothe + +sis, we evaluate our model on three benchmarks: Sintel [5] is a synthetic dataset that provides accurate depth annotations, covering diverse scenes with complex camera motion. KITTI [17] is a large driving dataset collected using stereo cameras and LiDAR sensors. Bonn [55] focuses on dynamic indoor scenes. To ensure fair comparisons, we follow the evaluation protocol used by MonST3R [113], where depth sequences are uniformly sampled from the datasets, extracting 50-110 frames per sequence for evaluation. + +Metrics. Following the standard affine-invariant depth evaluation protocol [65], we align the predicted video depth with the ground-truth depth before computing metrics. However, unlike single-image depth estimation [28, 105, 106], where depth alignment is performed per frame, we enforce global scale consistency by applying a single scale and shift across the entire video sequence. For quantitative evaluation, we adopt two widely used depth metrics: absolute relative error (Abs Rel) and the percentage of inlier points (with a threshold value of $\delta < 1.25$ ). + +Baselines. We compare Geo4D to state-of-the-art single-frame depth estimation methods (Marigold [28] and Depth-Anything-V2 [106]), video depth prediction (NVDS [95], ChronoDepth [70], and DepthCrafter [22]), and joint video depth and camera pose prediction (Robust-CVD [32], CausalSAM [116], and MonST3R [113]). + +Results. As shown in Table 1, all versions of Geo4D outperform state-of-the-art methods by a large margin. This includes DepthCrafter [22] and MonST3R [113], the most recent video depth diffusion model and the dynamic extension of DUSt3R to dynamic scenes, respectively. Notably, while both Geo4D and DepthCrafter are based on the same video diffusion model (DynamiCrafter), our model outperforms DepthCrafter in Abs Rel by $24.0\%$ on Sintel and $17.3\%$ on KITTI, despite solving a more general problem. Qualitatively, Fig. 3 shows that Geo4D achieves more consistent results, especially for fast-moving objects. + +![](images/31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg) +Figure 3. Qualitative results comparing Geo4D with MonST3R [113]. Attributed to our group-wise inference manner and prior geometry knowledge from pretrained video diffusion, our model successfully produces consistent 4D geometry under fast motion (first row) and deceptive reflection in the water (second row). + +![](images/da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg) + +![](images/6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg) + +![](images/f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg) + +![](images/73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg) + +# 4.3. Camera Pose Estimation + +Setup. We evaluate the performance of Geo4D on both the synthetic Sintel [5] dataset and the realistic TUM-dynamics [78] dataset. We follow the same evaluation protocol as in MonST3R [113]. Specifically, on Sintel, we select 14 dynamic sequences, and for TUM-dynamics, we sample the first 90 frames of each sequence with a temporal stride of 3. After aligning the predicted camera trajectory with the ground truth using the Umayama algorithm, we calculate three commonly used metrics: Absolute Translation Error (ATE), Relative Translation Error (RPE-T), and Relative Rotation Error (RPE-R). We compare our method with other state-of-the-art discriminative methods, which jointly predict camera pose and depth, including Robust-CVD [32], CausalSAM [116], and MonST3R [113]. + +Results. To the best of our knowledge, Geo4D is the first method that uses a generative model to estimate camera parameters in a dynamic scene. As shown in Tab. 2, compared to existing non-generative alternatives, we achieve much better camera rotation prediction (RPE-R) and comparable camera translation prediction (ATE and RPE-T). + +# 4.4. Qualitative Comparison + +4D reconstruction. We compare Geo4D with the state-of-the-art MonST3R method on the DAVIS [23] dataset. Up- + +
MethodSintelTUM-dynamics
ATE ↓RPE-T ↓RPE-R ↓ATE ↓RPE-T ↓RPE-R ↓
Robust-CVD [32]0.3600.1543.4430.1530.0263.528
CasualSAM [116]0.1410.0350.6150.0710.0101.712
MonST3R [113]0.1080.0420.7320.0630.0091.217
Geo4D (Ours)0.1850.0630.5470.0730.0200.635
+ +Table 2. Quantitative evaluation for camera pose estimation. We achieve comparable camera pose estimation performance with other discriminative SOTA methods. + +grading from pairwise alignment as in MonST3R to our group-wise alignment improves temporal consistency, leading to a more stable and globally coherent 4D reconstruction of point maps and camera trajectory, particularly in highly dynamic scenes. As shown in the top row of Fig. 3, Geo4D successfully tracks the racing car in 4D, whereas MonST3R struggles due to the rapid motion between pairs of images. Furthermore, likely due to the strong prior captured by the pre-trained video generative model, Geo4D correctly reconstructs the reflection of the flamingo in the water (second row in Fig. 3), whereas MonST3R misinterprets the reflection as a foreground object, resulting in incorrect depth. + +Video depth prediction. We compare Geo4D with state-of-the-art video depth predictors MonST3R [113] and DepthCrafter [22] on the Sintel [5] dataset. Qualitatively, Geo4D produces more detailed geometry, for instance for + +![](images/161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg) + +![](images/59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg) +Figure 4. Qualitative video depth results comparing Geo4D with MonST3R [113] and DepthCrafter [22]. Owing to our proposed multimodal training and alignment, as well as the prior knowledge from diffusion, our method can infer a more detailed structure (first row) and a more accurate spatial arrangement from video (second row). +Table 3. Ablation study for the different modalities of the geometric representation on the Sintel [5] dataset. We demonstrate the effectiveness of our key design choices that both leverage multi-modality as additional training supervision signal and postprocess through our proposed multi-modal alignment algorithm will improve the overall performance. + +
Strides / frameVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
150.920.21372.40.2100.0920.574
81.240.21272.80.2220.0740.524
41.890.20573.50.1850.0630.547
23.260.20472.90.1810.0580.518
+ +Table 4. Ablation study for the temporal sliding window stride on the Sintel [5] dataset. There is a trade-off between performance and inference speed. + +the rope on the stick in the first row of Fig. 4, and a better spatial arrangement between different dynamic objects, as shown in the second row of Fig. 4. + +# 4.5. Ablation Study + +We ablate our key design choices and the effect of different modalities on the Sintel dataset. + +We study the effect of multi-modality in Tab. 3. The three modalities—point map, disparity map, and ray map—can be used either at training or inference time, or both. The first two rows show that the diffusion model trained with point maps as a single modality performs worse in both video depth and camera pose estimation than the diffusion model trained with all three modalities. Therefore, the other two modalities, even if they can be seen as redundant, serve as additional supervisory signals during training, which improves the generalization ability of the diffusion model. + +We then investigate the effectiveness of our multi-modal alignment algorithm. Compared with the second to the fourth row in Tab. 3, which leverage only a single modality during inference, multi-modal alignment optimization (last row) achieves the best performance, showing the benefits of fusing the multiple modalities at inference time. + +We ablate the sliding window stride in Tab. 4. Results improve with a shorter stride, in part because this means that more windows and estimates are averaged, reducing the variance of the predictions by the denoising diffusion model, which is stochastic. We choose stride $s = 4$ for our main results to balance runtime and performance. Note that MonST3R [113] requires 2.41 seconds to process one frame under the same setting, so our method is 1.27 times faster than MonST3R [113]. + +# 5. Discussion and Conclusion + +We have introduced Geo4D, a novel approach that adapts a video generator for dynamic 4D reconstruction. By building on a pre-trained video generator, Geo4D achieves excellent generalization to real data despite being trained only on synthetic 4D data. We have also demonstrated the benefits of predicting multiple modalities and fusing them at test time via optimization. Our model outperforms state-of-the-art methods on video depth and camera rotation prediction, particularly in challenging dynamic scenes. + +Despite these successes, our approach has limitations. One is that the point map encoder-decoder is still not entirely accurate, which in turn is a bottleneck for the overall reconstruction quality. + +Our approach also opens a path to integrating 4D geometry into video foundation models, e.g., to generate 3D animations from text, or to provide a more actionable signal when the video model is used as a proxy for a world model. + +Acknowledgments. The authors of this work were supported by Clarendon Scholarship, ERC 101001212-UNION, and EPSRC EP/Z001811/1 SYN3D. + +# References + +[1] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, Jenia Jitsev, Simon Kornblith, Pang Wei Koh, Gabriel Ilharco, Mitchell Wortman, and Ludwig Schmidt. Openflamingo: An opensource framework for training large autoregressive vision-language models, 2023. 5 +[2] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. Bedlam: A synthetic dataset of bodies exhibiting detailed lifelike animated motion, 2023. 6, 15 +[3] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 3 +[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 22563-22575, 2023. 3 +[5] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In European Conference on Computer Vision (ECCV), 2012. 6, 7, 8, 15, 16 +[6] Yohann Cabon, Naila Murray, and Martin Humenberger. Virtual kitti 2, 2020. 6, 15 +[7] David Charatan, Sizhe Lester Li, Andrea Tagliasacchi, and Vincent Sitzmann. pixelsplat: 3d gaussian splats from image pairs for scalable generalizable 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19457-19467, 2024. 2 +[8] Xingyu Chen, Yue Chen, Yuliang Xiu, Andreas Geiger, and Anpei Chen. Easi3r: Estimating disentangled motion from dust3r without training. arXiv preprint arXiv:2503.24391, 2025. 3 +[9] Yuedong Chen, Haofei Xu, Chuanxia Zheng, Bohan Zhuang, Marc Pollefeys, Andreas Geiger, Tat-Jen Cham, and Jianfei Cai. MVSplat: efficient 3d gaussian splattering from sparse multi-view images. arXiv, 2403.14627, 2024. 2 +[10] Yuedong Chen, Chuanxia Zheng, Haofei Xu, Bohan Zhuang, Andrea Vedaldi, Tat-Jen Cham, and Jianfei Cai. Mvsplat360: Feed-forward 360 scene synthesis from sparse views. In Neural Information Processing Systems (NeurIPS), 2024. 3 +[11] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In European conference on computer vision (ECCV), pages 628-644. Springer, 2016. 2 +[12] Wen-Hsuan Chu, Lei Ke, and Katerina Fragkiadaki. Dreamscene4d: Dynamic multi-object scene generation from monocular videos. Advances in Neural Information Processing Systems (NeurIPS), 2024. 2 + +[13] Yiquan Duan, Xianda Guo, and Zheng Zhu. Diffusion-depth: Diffusion denoising approach for monocular depth estimation. In European Conference on Computer Vision (ECCV), pages 432-449. Springer, 2024. 3 +[14] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. In European Conference on Computer Vision (ECCV), pages 241-258. Springer, 2024. 3, 4 +[15] Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3 +[16] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22930-22941, 2023. 3 +[17] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The KITTI dataset. International Journal of Robotics Research (IJRR), 2013. 6 +[18] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In Proceedings of the IEEE/CVF international conference on computer vision (CVPR), pages 9785-9795, 2019. 2 +[19] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. In International Conference on Learning Representations (ICLR), 2024. 3 +[20] Junlin Han, Filippos Kokkinos, and Philip Torr. Vfusion3d: Learning scalable 3d generative models from video diffusion models. In European Conference on Computer Vision (ECCV), pages 333-350. Springer, 2024. 3 +[21] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Neural Information Processing Systems (NeurIPS), 35:8633-8646, 2022. 3 +[22] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 2, 6, 7, 8, 15 +[23] Jia-Bin Huang, Sing Bing Kang, Narendra Ahuja, and Johannes Kopf. Temporally coherent completion of dynamic video. In ACM, 2016. 2, 7 +[24] Matthias Innmann, Michael Zollhöfer, Matthias Nießner, Christian Theobalt, and Marc Stamminger. Volumedeform: Real-time volumetric non-rigid reconstruction. In European conference on computer vision (ECCV), pages 362-379. Springer, 2016. 2 +[25] Tomas Jakab, Ruining Li, Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Farm3d: Learning articulated 3d animals by distilling 2d diffusion. In 2024 International + +Conference on 3D Vision (3DV), pages 852-861. IEEE, 2024. 3 +[26] Yanqin Jiang, Chaohui Yu, Chenjie Cao, Fan Wang, Weiming Hu, and Jin Gao. *Animate3d: Animating any 3d model with multi-view video diffusion*. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3 +[27] Zeren Jiang, Chen Guo, Manuel Kaufmann, Tianjian Jiang, Julien Valentin, Otmar Hilliges, and Jie Song. Multiply: Reconstruction of multiple people from monocular video in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2 +[28] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9492-9502, 2024. 2, 3, 4, 6 +[29] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023. 2, 3 +[30] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 3 +[31] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 3 +[32] Johannes Kopf, Xuejian Rong, and Jia-Bin Huang. Robust consistent video depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1611-1621, 2021. 6, 7 +[33] Akshay Krishnan, Xinchen Yan, Vincent Casser, and Abhijit Kundu. Orchid: Image latent diffusion for joint appearance and geometry generation. arXiv preprint arXiv:2501.13087, 2025. 3, 4 +[34] Jiahui Lei, Yijia Weng, Adam Harley, Leonidas Guibas, and Kostas Daniilidis. Mosca: Dynamic gaussian fusion from casual videos via 4d motion scaffolds. arXiv preprint arXiv:2405.17421, 2024. 2 +[35] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pages 71-91. Springer, 2024. 2 +[36] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3 +[37] Xuanyi Li, Daquan Zhou, Chenxu Zhang, Shaodong Wei, Qibin Hou, and Ming-Ming Cheng. Sora generates videos with stunning geometrical consistency. arXiv, 2402.17403, 2024. 2 + +[38] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 2 +[39] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4273-4284, 2023. 2 +[40] Zhengqi Li, Richard Tucker, Forrester Cole, Qianqian Wang, Linyi Jin, Vickie Ye, Angjoo Kanazawa, Aleksander Holynski, and Noah Snavely. Megasam: Accurate, fast and robust structure and motion from casual dynamic videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3 +[41] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2018. 2 +[42] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 300–309, 2023. 3 +[43] Youtian Lin, Zuozhuo Dai, Siyu Zhu, and Yao Yao. Gaussian-flow: 4d reconstruction with dynamic 3d gaussian particle. In CVPR, 2024. 2 +[44] Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model, 2024. 3 +[45] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), pages 9298–9309, 2023. 3 +[46] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. In The Twelfth International Conference on Learning Representations (ICLR), 2024. +[47] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9970-9980, 2024. 3 +[48] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR, 2019. 6 +[49] Yuanxun Lu, Jingyang Zhang, Tian Fang, Jean-Daniel Nahmias, Yanghai Tsin, Long Quan, Xun Cao, Yao Yao, and Shiwei Li. Matrix3d: Large photogrammetry model all-in-one. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3, 4 + +[50] Lukas Mehl, Jenny Schmalfuss, Azin Jahedi, Yaroslava Nalivayko, and Andres Bruhn. Spring: A high-resolution high-detail dataset and benchmark for scene flow, optical flow and stereo. In Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 6, 15 +[51] Luke Melas-Kyriazi, Iro Laina, Christian Rupprecht, and Andrea Vedaldi. Realfusion: 360deg reconstruction of any object from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 8446-8455, 2023. 3 +[52] B Mildenhall, PP Srinivasan, M Tancik, JT Barron, R Ramamoorthi, and R Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision (ECCV), 2020. 2 +[53] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 343-352, 2015. 2 +[54] NVIDIA, Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, Daniel Dworakowski, Jiaojiao Fan, Michele Fenzi, Francesco Ferroni, Sanja Fidler, Dieter Fox, Songwei Ge, Yunhao Ge, Jinwei Gu, Siddharth Gururani, Ethan He, Jiahui Huang, Jacob Huffman, Pooya Jannaty, Jingyi Jin, Seung Wook Kim, Gergely Klár, Grace Lam, Shiyi Lan, Laura Leal-Taixe, Anqi Li, Zhaoshuo Li, Chen-Hsuan Lin, Tsung-Yi Lin, Huan Ling, Ming-Yu Liu, Xian Liu, Alice Luo, Qianli Ma, Hanzi Mao, Kaichun Mo, Arsalan Mousavian, Seungjun Nah, Sriharsha Niverty, David Page, Despoina Paschalidou, Zeeshan Patel, Lindsey Pavao, Morteza Ramezanali, Fitsum Reda, Xiaowei Ren, Vasanth Rao Naik Sabavat, Ed Schmerling, Stella Shi, Bartosz Stefaniak, Shitao Tang, Lyne Tchapmi, Przemek Tredak, Wei-Cheng Tseng, Jibin Varghese, Hao Wang, Haoxiang Wang, Heng Wang, Ting-Chun Wang, Fangyin Wei, Xinyue Wei, Jay Zhangjie Wu, Jiashu Xu, Wei Yang, Lin Yen-Chen, Xiaohui Zeng, Yu Zeng, Jing Zhang, Qinsheng Zhang, Yuxuan Zhang, Qingqing Zhao and Artur Zolkowski. Cosmos world foundation model platform for physical ai. arXiv, 2501.03575, 2025. 2 +[55] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguère, and C. Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862, 2019. 6 +[56] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2 +[57] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 2 + +[58] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: a higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 40 (6):1-12, 2021. 2 +[59] Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjoyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Rocktäschel. Genie 2: A large-scale foundation world model, 2024. 2 +[60] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European conference on computer vision (ECCV), pages 523-540. Springer, 2020. 2 +[61] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3 +[62] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 2 +[63] Lingteng Qiu, Guanying Chen, Xiaodong Gu, Qi Zuo, Mutian Xu, Yushuang Wu, Weihao Yuan, Zilong Dong, Liefeng Bo, and Xiaoguang Han. Richdreamer: A generalizable normal-depth diffusion model for detail richness in text-to-3d. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9914–9925, 2024. 3 +[64] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), pages 8748-8763. PmLR, 2021. 5 +[65] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 44:1623-1637, 2019. 6 +[66] Jiawei Ren, Kevin Xie, Ashkan Mirzaei, Hanxue Liang, Xiaohui Zeng, Karsten Kreis, Ziwei Liu, Antonio Torralba, Sanja Fidler, Seung Wook Kim, and Huan Ling. L4gm: Large 4d gaussian reconstruction model. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3 +[67] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 4 + +[68] Kyle Sargent, Zizhang Li, Tanmay Shah, Charles Herrmann, Hong-Xing Yu, Yunzhi Zhang, Eric Ryan Chan, Dmitry Lagun, Li Fei-Fei, Deqing Sun, et al. Zeronvs: Zero-shot 360-degree view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9420–9429, 2024. 3 +[69] Saurabh Saxena, Charles Herrmann, Junhwa Hur, Abhishek Kar, Mohammad Norouzi, Deqing Sun, and David J Fleet. The surprising effectiveness of diffusion models for optical flow and monocular depth estimation. Advances in Neural Information Processing Systems (NeurIPS), 36:39443-39469, 2023. 3 +[70] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. arXiv, 2406.01493, 2024. 6 +[71] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3 +[72] Yawar Siddiqui, Antonio Alliegro, Alexey Artemov, Tatiana Tommasi, Daniele Sirigatti, Vladislav Rosov, Angela Dai, and Matthias Nießner. Meshgpt: Generating triangle meshes with decoder-only transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19615-19625, 2024. 2 +[73] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, Devi Parikh, Sonal Gupta, and Yaniv Taigman. Make-a-video: Text-to-video generation without text-video data. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3 +[74] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2437-2446, 2019. 2 +[75] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems (NeurIPS), 34:19313-19325, 2021. 2, 4 +[76] Brandon Smart, Chuanxia Zheng, Iro Laina, and Victor Adrian Prisacariu. Splatt3r: Zero-shot gaussian splatting from uncalibrated image pairs. arXiv preprint arXiv:2408.13912, 2024. 2 +[77] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. 6 +[78] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of rgb-d slam systems. 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 573-580, 2012. 7 +[79] Stanislaw Szymanowicz, Christian Rupprecht, and Andrea Vedaldi. Splatter Image: Ultra-fast single-view 3D recon + +struction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2 +[80] Stanislaw Szymanowicz, Eldar Insafutdinov, Chuanxia Zheng, Dylan Campbell, João F. Henriques, Christian Rupprecht, and Andrea Vedaldi. Flash3D: Feed-forward generalisable 3D scene reconstruction from a single image. In Proceedings of the International Conference on 3D Vision (3DV), 2025. 2 +[81] Stanislaw Szymanowicz, Jason Y Zhang, Pratul Srinivasan, Ruiqi Gao, Arthur Brussee, Aleksander Holynski, Ricardo Martin-Brualla, Jonathan T Barron, and Philipp Henzler. Bolt3d: Generating 3d scenes in seconds. arXiv preprint arXiv:2503.14445, 2025. 3 +[82] Aether Team, Haoyi Zhu, Yifan Wang, Jianjun Zhou, Wenzheng Chang, Yang Zhou, Zizun Li, Junyi Chen, Chunhua Shen, Jiangmiao Pang, and Tong He. Aether: Geometric-aware unified world modeling. arXiv preprint arXiv:2503.18945, 2025. 3 +[83] Shubham Tulsiani, Tinghui Zhou, Alexei A Efros, and Jitendra Malik. Multi-view supervision for single-view reconstruction via differentiable ray consistency. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2626-2634, 2017. 2 +[84] S. Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis and Machine Intelligence, 13(4):376-380, 1991. 15 +[85] Vikram Voleti, Chun-Han Yao, Mark Boss, Adam Letts, David Pankratz, Dmitry Tochilkin, Christian Laforte, Robin Rombach, and Varun Jampani. Sv3d: Novel multi-view synthesis and 3d generation from a single image using latent video diffusion. In European Conference on Computer Vision (ECCV), pages 439-457. Springer, 2024. 3 +[86] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. In International Conference on 3D Vision (3DV), 2024. 2 +[87] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12619-12629, 2023. 3 +[88] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3 +[89] Jianyuan Wang, Minghao Chen, Nikita Karaev, Andrea Vedaldi, Christian Rupprecht, and David Novotny. VGGT: Visual geometry grounded network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 1, 2 +[90] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), pages 52-67, 2018. 2 +[91] Qianqian Wang, Vickie Ye, Hang Gao, Weijia Zeng, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of + +motion: 4d reconstruction from a single video. In arXiv preprint arXiv:2407.13764, 2024. 2 +[92] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. Dust3r: Geometric 3d vision made easy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20697-20709, 2024. 1, 2, 3, 4 +[93] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian A. Scherer. Tartanair: A dataset to push the limits of visual slam. 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4909-4916, 2020. 6, 15 +[94] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems (NeurIPS), 36:7594-7611, 2023. 3 +[95] Yiran Wang, Min Shi, Jiaqi Li, Zihao Huang, Zhiguo Cao, Jianming Zhang, Ke Xian, and Guosheng Lin. Neural video depth stabilizer. In ICCV, 2023. 6 +[96] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. Advances in Neural Information Processing Systems (NeurIPS), 36, 2024. 3 +[97] Daniel Watson, William Chan, Ricardo Martin Brulla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 4 +[98] Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain BRÉGIER, Yohann Cabon, Vaibhav ARORA, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jerome Revaud. CroCo: self-supervised pre-training for 3D vision tasks by cross-view completion. In Proc. NeurIPS, 2022. 2 +[99] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In CVPR, 2024. 2 +[100] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 1-10, 2020. 4 +[101] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3 +[102] Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. Dynamiccafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision (ECCV), pages 399-417. Springer, 2024. 1, 3, 4, 6 + +[103] Tian-Xing Xu, Xiangjun Gao, Wenbo Hu, Xiaoyu Li, Song-Hai Zhang, and Ying Shan. Geometrycrafter: Consistent geometry estimation for open-world videos with diffusion priors, 2025. 3 +[104] Jianing Yang, Alexander Sax, Kevin J. Liang, Mikael Henaff, Hao Tang, Ang Cao, Joyce Chai, Franziska Meier, and Matt Feiszli. Fast3r: Towards 3d reconstruction of $1000+$ images in one forward pass. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2 +[105] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Ji-ashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proc. CVPR, 2024. 6 +[106] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. Advances in Neural Information Processing Systems (NeurIPS), 2024. 6 +[107] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20331-20341, 2024. 2 +[108] David Yifan Yao, Albert J. Zhai, and Shenlong Wang. Uni4d: Unifying visual foundation models for 4d modeling from a single video, 2025. 3 +[109] Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024.3 +[110] Xumin Yu, Yongming Rao, Ziyi Wang, Zuyan Liu, Jiwen Lu, and Jie Zhou. Pointr: Diverse point cloud completion with geometry-aware transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12498-12507, 2021. 2 +[111] Yuheng Yuan, Qiuhong Shen, Xingyi Yang, and Xinchao Wang. 1000+ fps 4d gaussian splatting for dynamic scene rendering, 2025. 2 +[112] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. International Journal of Computer Vision (IJCV), pages 1-15, 2024. 3 +[113] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. In International Conference on Learning Representations (ICLR), 2025. 1, 2, 3, 5, 6, 7, 8 +[114] Jason Y Zhang, Amy Lin, Moneish Kumar, Tzu-Hsuan Yang, Deva Ramanan, and Shubham Tulsiani. Cameras as rays: Pose estimation via ray diffusion. In International Conference on Learning Representations (ICLR), 2024. 5 +[115] Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiatao Gu. World-consistent video diffusion with explicit 3d + +modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3 +[116] Zhoutong Zhang, Forrester Cole, Zhengqi Li, Michael Rubinstein, Noah Snavely, and William T. Freeman. Structure and motion from casual videos. In European Conference on Computer Vision (ECCV), 2022. 6, 7 +[117] Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. Unleashing text-to-image diffusion models for visual perception. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5729-5739, 2023. 3 +[118] Chuanxia Zheng and Andrea Vedaldi. Free3d: Consistent novel view synthesis without 3d representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9720-9731, 2024. 3, 4 +[119] Yang Zheng, Adam W Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 19855-19865, 2023. 6, 15 + +# Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction Supplementary Material + +In this supplementary material, we provide additional information to supplement our main submission. The code is available here for research purposes: github.com/ jzr99/Geo4D + +# 6. Implementation Details + +# 6.1. Training Dataset + +As shown in Tab. 5, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. Although all datasets are synthetic, we found that some depth pixels are missing in PointOdyssey [119]. To address this, we apply max pooling to inpaint the missing pixels. During training, we sample each dataset according to the ratios in Tab. 5. For each sample, we select 16 frames from the sequence, with the sampling stride randomly chosen from $\{1,2,3\}$ to allow our diffusion model to adapt to input videos with various frame rates. + +# 6.2. Optimization Details + +The overall optimization process is outlined in Algorithm 1. We first predict all three modality maps using our diffusion model for each video clip $g$ . The predicted point maps are then roughly aligned based on the overlapping frames using the Umayama algorithm [84]. The camera intrinsic $\mathbf{K}^k$ is initialized by minimizing the projection error of the point map $X^{k,g^k}$ in its reference (first) frame $k$ within each window group $g^k$ . The camera extrinsics are then initialized using the RANSAC PnP algorithm. In the first stage of optimization, the point maps are roughly disentangled into camera pose and depth map. The disparity map is then aligned with the global depth inferred from point maps by solving Eq. (5) from the main paper to obtain the scale and shift parameters. The camera parameters extracted from the predicted ray map are aligned with the global camera trajectory based on the reference (first) frame of each video clip $g$ via Eq. (8) from the main paper. After initializing all the alignment learnable parameters, including rotation $\mathbf{R}_{*}^{g}$ , scale $\lambda_{*}^{g}$ , and shift $\beta_{*}^{g}$ across different modalities, where $* \in \{\mathrm{p},\mathrm{d},\mathrm{c}\}$ , we jointly optimize all the learnable parameters by Eq. (10). Specifically, we set the weights for each loss term in Eq. (10) as $\alpha_{1} = 1, \alpha_{2} = 2, \alpha_{3} = 0.005, \alpha_{4} = 0.015$ to roughly equalize the scale of the different losses. + +Algorithm 1 Multi-Modal Alignment Optimization +1: $X^{i,g}, D^{i,g}, r^{i,g} \gets$ Predicted by our diffusion model +2: $D_{\mathrm{p}}^{i}, \lambda_{\mathrm{p}}^{g}, R_{\mathrm{p}}^{g}, \beta_{\mathrm{p}}^{g} \gets$ Initialized by Umayama algorithm +3: $K_{\mathrm{p}}^{k} \gets$ Optimized from $X^{k,g^k}$ +4: $R_{\mathrm{p}}^{i}, o_{\mathrm{p}}^{i} \gets$ Initialized by Ransac PnP from pointmaps $X^i$ +5: $R_{\mathrm{c}}^{i,g}, o_{\mathrm{c}}^{i,g} \gets$ Initialized by Eqs. (6) and (7) from raymaps $r^{i,g}$ +6: repeat +7: if Iteration = Align start iteration then +8: $\lambda_{\mathrm{d}}^{g}, \beta_{\mathrm{d}}^{g} \gets \arg \min \mathcal{L}_{\mathrm{d}}$ (Eq. (5)) +9: $R_{\mathrm{c}}^{g}, \lambda_{\mathrm{c}}^{g}, \beta_{\mathrm{c}}^{g} \gets \arg \min \mathcal{L}_{\mathrm{c}}$ (Eq. (8)) +10: else if Iteration < Align start iteration then +11: $D_{\mathrm{p}}^{i}, K_{\mathrm{p}}^{i}, R_{\mathrm{p}}^{i}, o_{\mathrm{p}}^{i}, \lambda_{\mathrm{p}}^{g}, R_{\mathrm{p}}^{g}, \beta_{\mathrm{p}}^{g}, \gets \arg \min \mathcal{L}_{\mathrm{p}} + \mathcal{L}_{\mathrm{s}}$ +12: else +13: $D_{\mathrm{p}}^{i}, K_{\mathrm{p}}^{i}, R_{\mathrm{p}}^{i}, o_{\mathrm{p}}^{i}, \lambda_{*}^{g}, R_{*}^{g}, \beta_{*}^{g} \gets \arg \min \mathcal{L}_{\mathrm{all}}$ +14: end if +15: until max loop reached + +
DatasetScene type#Frames#SequencesRatio
PointOdyssey [119]Indoors/Outdoors200K13116.7%
TartanAir [93]Indoors/Outdoors1000K16316.7%
Spring [50]Outdoors6K3716.7%
VirtualKITTI [6]Driving43K32016.7%
BEDLAM [2]Indoors/Outdoors380K10K33.3%
+ +Table 5. Details of training datasets. Our method only uses synthetic datasets for training. + +
StepsVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
10.22170.70.2340.0720.753
50.20573.50.1850.0630.547
100.20773.20.2120.0710.508
250.22072.20.2110.0740.564
+ +Table 6. Ablation study for the DDIM sampling steps. on the Sintel [5] dataset. + +# 7. Additional Analysis + +# 7.1. Ablating the Number of Denoising Steps + +We study the influence of the number of denoising steps during inference. As shown in Tab. 6, the model achieves optimal performance after around 5 steps. Compared to the video generation task, where a larger number of denoising steps usually produces a more detailed generated video, 4D reconstruction is a more deterministic task, which requires fewer steps. Similar phenomena are also observed in [22], which uses a video generator for video depth estimation. + +![](images/235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg) +Figure 5. Additional qualitative results. Our method generalizes well to various scenes with different 4D objects and performs robustly against different camera and object motions. + +
MethodVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
w/o fine-tuned0.21272.10.1920.0610.577
w fine-tuned0.20573.50.1850.0630.547
+ +Table 7. Ablation study for the fine-tuned point map VAE on the Sintel [5] dataset. The fine-tuned point map VAE performs better than the original one. + +# 7.2. Ablation Study for Fine-Tuned Point Map VAE + +As stated in the main paper, we added an additional branch to predict the uncertainty for our point map VAE and fine-tuned it based on Eq. 3. We perform an ablation study on our fine-tuning strategy. As shown in Tab. 7, our fine-tuned point map VAE achieves consistently better performance on both video depth estimation and camera pose estimation tasks compared with the original pre-trained image VAE, + +![](images/8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg) +Figure 6. Visualization of different geometric modality maps. + +demonstrating the necessity and effectiveness of our finetuning strategy. + +# 7.3. Analysis of Multi-Modal Representation + +Point maps (PMs) and disparity maps (DMs) are complementary. DMs better represent near objects, while PMs are more depth-agnostic (e.g., human vs house in Fig. 6 (b,c)). As in prior work, DMs are affine invariant (which here makes them range-compatible with the pretrained RGB VAE); their scale and shift, needed to recover undistorted geometry, are inferred by matching them to the predicted PMs. Ray maps (RMs) help infer the camera pose when PMs fail to represent points at infinity (such as the sky in Fig. 6 (e)). We observed that PMs tend to be noisier than DMs, so we prioritized modeling the PMs' uncertainty. Per-pixel uncertainty for ray maps are less meaningful given the high degree of correlation between individual rays. During multi-modal alignment, we align global point clouds with DMs in disparity space and with PMs in linear space. This naturally gives more weight to near points, which tend to be estimated well by DMs, and weighs points based on uncertainty with PMs, thus taking advantage of both modalities. + +# 8. Visualization + +Figure 5 shows additional visualizations for indoor, outdoor, and driving scenes. Although our model is only trained on synthetic datasets, it generalizes to real-world data with diverse objects and motions. + +# 9. Limitations + +Although our method performs well and generalizes to a wide range of in-the-wild videos, it can struggle in cases involving significant changes in focal length or extreme camera motion throughout a sequence. This limitation likely stems from the lack of focal length variation in our training data. Incorporating more sequences with diverse camera movements and zooming effects could help mitigate this issue. Moreover, due to the inherent temporal attention mechanism in our network architecture, our approach currently supports only monocular video input. Extending the method to handle multi-view images or videos is a promising direction for future work. \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07961/images/0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg b/data/2025/2504_07xxx/2504.07961/images/0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..438aef998c1c425330b1f5b87c7f0feb30139444 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b59d4661edd775d07fc4b3e221d53d4493aac2dccbb721c0b076e6a382852031 +size 115881 diff --git a/data/2025/2504_07xxx/2504.07961/images/105a405ac4ff6c789f6798e930d20e77df96678809918788f2d6647f5316cbf7.jpg b/data/2025/2504_07xxx/2504.07961/images/105a405ac4ff6c789f6798e930d20e77df96678809918788f2d6647f5316cbf7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87e699013ba74392ff9ecf9c8d7955aca442ff13 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/105a405ac4ff6c789f6798e930d20e77df96678809918788f2d6647f5316cbf7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3fb61c3a70f56c9d56bac78c0a323b8d4d1d6ba7eeb1c49a03b25c1ec14b406 +size 8357 diff --git a/data/2025/2504_07xxx/2504.07961/images/161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg b/data/2025/2504_07xxx/2504.07961/images/161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e854c234477e44fc870d7965d4e78d49700c99bf --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3118035e35132bce6ff5584732917af5ed5e2bc25cebce717c207859935b7e56 +size 52808 diff --git a/data/2025/2504_07xxx/2504.07961/images/235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg b/data/2025/2504_07xxx/2504.07961/images/235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45192bb28efcb0a86c70d6f0ec1135b349be2de7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70abba108c01dde67150d4d9aa51667fafb67db5cd8e4045009a33d0a0b780a9 +size 200507 diff --git a/data/2025/2504_07xxx/2504.07961/images/31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg b/data/2025/2504_07xxx/2504.07961/images/31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63ba5ad5f0a3a91f209eb20c16bf642764b0cf42 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deb0e506ba08fc25b5d8afedf1a18f2c7d79042e1cb29fc8b26f2c305ad4e459 +size 32547 diff --git a/data/2025/2504_07xxx/2504.07961/images/349163f4532c8b2177783f9f58045e52c7cd8ec68e668b9f16e1ca2f4a326abc.jpg b/data/2025/2504_07xxx/2504.07961/images/349163f4532c8b2177783f9f58045e52c7cd8ec68e668b9f16e1ca2f4a326abc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a6295634f976bd82e15bd189518408ca571f8ae --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/349163f4532c8b2177783f9f58045e52c7cd8ec68e668b9f16e1ca2f4a326abc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:881b15a51b633845f0cff4cb7534635cdcb875c2833f385ff61ba12450a7548b +size 7581 diff --git a/data/2025/2504_07xxx/2504.07961/images/399f0f33a67ecd0f9b30283d31a8486770d71fd31e3285f63b9dbb90ca1b6c31.jpg b/data/2025/2504_07xxx/2504.07961/images/399f0f33a67ecd0f9b30283d31a8486770d71fd31e3285f63b9dbb90ca1b6c31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c64fa59888f472c52b5844732ec10a8016a08486 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/399f0f33a67ecd0f9b30283d31a8486770d71fd31e3285f63b9dbb90ca1b6c31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b5d915cee86bc86e87938963f519de853961527c4f9b653bbbcd3a8ca5a9dd0 +size 4518 diff --git a/data/2025/2504_07xxx/2504.07961/images/3a46001fcd7f623e3cf8dd49722f9a3f098ee6bcf7a68c89fbf981cd929c42a0.jpg b/data/2025/2504_07xxx/2504.07961/images/3a46001fcd7f623e3cf8dd49722f9a3f098ee6bcf7a68c89fbf981cd929c42a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e5623969beafa3de8d7cc9baeefa0ce3614a181 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/3a46001fcd7f623e3cf8dd49722f9a3f098ee6bcf7a68c89fbf981cd929c42a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b195106aa569886a0f5adda773a5b682ccc29ce3fbce3576310b60d9348813f +size 28689 diff --git a/data/2025/2504_07xxx/2504.07961/images/5364a2a6461c500f21711140d4a24a474dee3729cfe59351e73e5d692522a123.jpg b/data/2025/2504_07xxx/2504.07961/images/5364a2a6461c500f21711140d4a24a474dee3729cfe59351e73e5d692522a123.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc3fa19d886c71cfa369f258625ac91628178605 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/5364a2a6461c500f21711140d4a24a474dee3729cfe59351e73e5d692522a123.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2650f181fc93e9ad74d6587e3f512398d29f0b3943341c132469e77a1ef0ec56 +size 21859 diff --git a/data/2025/2504_07xxx/2504.07961/images/59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg b/data/2025/2504_07xxx/2504.07961/images/59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9f4ec74c03b6b758b00c5c4f8a9ea15fd4698f5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:576ae638960c1be861bffd69d01dbea0350c303498400e2d038e29c6e235ce44 +size 43956 diff --git a/data/2025/2504_07xxx/2504.07961/images/61da481cb2f4a6c3d3a45a38a155d0340b610c8b37b7fd2d27435e67430bc193.jpg b/data/2025/2504_07xxx/2504.07961/images/61da481cb2f4a6c3d3a45a38a155d0340b610c8b37b7fd2d27435e67430bc193.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c120e377861d57d3c4a42651c8e3b6dbc576c9db --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/61da481cb2f4a6c3d3a45a38a155d0340b610c8b37b7fd2d27435e67430bc193.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcce2020335a493e72719b4303554790023e3da5e7970526e934ceebc61693c +size 5110 diff --git a/data/2025/2504_07xxx/2504.07961/images/665a34df6fe043aa68614ddaf1862d09d85da07e4a04e260e1736500a73a4d19.jpg b/data/2025/2504_07xxx/2504.07961/images/665a34df6fe043aa68614ddaf1862d09d85da07e4a04e260e1736500a73a4d19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23aef6e065dc44ea37e3595088a1035938d13a50 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/665a34df6fe043aa68614ddaf1862d09d85da07e4a04e260e1736500a73a4d19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:746f749692b148a39966a9ae7c9b9de84d03d1ed8e346a5f8d7c6225b0b77646 +size 12964 diff --git a/data/2025/2504_07xxx/2504.07961/images/6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg b/data/2025/2504_07xxx/2504.07961/images/6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7222fcabc9ffec02d60ef578034977d567f337da --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ffc6b1c3496926c70544c1230e19b36470fbc4f31f501ee7c01e1e15a6df3d2 +size 44094 diff --git a/data/2025/2504_07xxx/2504.07961/images/7038d9900452bdd46bac1ace91e6854e016dc9bd721e4b5891da5d7169423852.jpg b/data/2025/2504_07xxx/2504.07961/images/7038d9900452bdd46bac1ace91e6854e016dc9bd721e4b5891da5d7169423852.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45da300ecc0c3d662d052a1d48d2e97f639ee81a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/7038d9900452bdd46bac1ace91e6854e016dc9bd721e4b5891da5d7169423852.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58acdbf598f91fde475fc5747d5594334ef47fa87a0151db579da3766b5c88a5 +size 32759 diff --git a/data/2025/2504_07xxx/2504.07961/images/73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg b/data/2025/2504_07xxx/2504.07961/images/73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a55555146e547956d7137afd94ae70bfe4e75831 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae6986c1ad610b5dffd4c72db79f2f327082c9ceaf8a15615a3157c2159434c5 +size 48155 diff --git a/data/2025/2504_07xxx/2504.07961/images/82fafccbf5422e0e503459b4eec75c5222de00b4abd32f90963cc3f024704a27.jpg b/data/2025/2504_07xxx/2504.07961/images/82fafccbf5422e0e503459b4eec75c5222de00b4abd32f90963cc3f024704a27.jpg new file mode 100644 index 0000000000000000000000000000000000000000..486898a2641a73d0847172865c673b56710a5499 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/82fafccbf5422e0e503459b4eec75c5222de00b4abd32f90963cc3f024704a27.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7996bb0608eac96d45bc8ca8fc124381e143f0a5c7e6a9f285ba9df145c5a621 +size 10709 diff --git a/data/2025/2504_07xxx/2504.07961/images/865e847eb30e6f45bfa98558af28b890a5b797f29d27a850c9a4f209048b3886.jpg b/data/2025/2504_07xxx/2504.07961/images/865e847eb30e6f45bfa98558af28b890a5b797f29d27a850c9a4f209048b3886.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f092d9dcfd3ed456bc9317a36e4009cc2febca08 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/865e847eb30e6f45bfa98558af28b890a5b797f29d27a850c9a4f209048b3886.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58d7bb731fdf88d5a6a8bc5c673be988777834efc8012119e58374488c0c8f31 +size 25075 diff --git a/data/2025/2504_07xxx/2504.07961/images/8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg b/data/2025/2504_07xxx/2504.07961/images/8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66b80d442e4207153111c9b56c9a2ef9cd648281 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b5d7f1b047ff39855c9747842a668034c61cbe21da87e1a0a3a4dcae021702a +size 24122 diff --git a/data/2025/2504_07xxx/2504.07961/images/a3bd301507763e8287ae60b8ae2ce0a502a3f782186fa27e1fb10da1c54d0b3a.jpg b/data/2025/2504_07xxx/2504.07961/images/a3bd301507763e8287ae60b8ae2ce0a502a3f782186fa27e1fb10da1c54d0b3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe485ed1b89fe405fdc6feb4edd7f598300afae1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/a3bd301507763e8287ae60b8ae2ce0a502a3f782186fa27e1fb10da1c54d0b3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ae71385e7bc9a7dc54a32e1709bb99d35b0f05d53b31d7f3113ceae60266d9b +size 19739 diff --git a/data/2025/2504_07xxx/2504.07961/images/b89cb29cbc557e4a2704facd1add616f57970c058a17ada14246d706fded2bf7.jpg b/data/2025/2504_07xxx/2504.07961/images/b89cb29cbc557e4a2704facd1add616f57970c058a17ada14246d706fded2bf7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73a75289f5b446722a238c8ab888f5282fe89ca8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/b89cb29cbc557e4a2704facd1add616f57970c058a17ada14246d706fded2bf7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729dc415f007489d914a0b49a1e534c034cd3f4e86263d83534792207671e07c +size 7188 diff --git a/data/2025/2504_07xxx/2504.07961/images/c076f88109dab7c9696980c607a9ee0e70c12cc68a7a1debc504756168c7c010.jpg b/data/2025/2504_07xxx/2504.07961/images/c076f88109dab7c9696980c607a9ee0e70c12cc68a7a1debc504756168c7c010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83a5e8a9db8f5ae60bb58b4a20d8834c76132664 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/c076f88109dab7c9696980c607a9ee0e70c12cc68a7a1debc504756168c7c010.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d4740b2923ef827cb4c2624d0a4b32fccc54c770bf40c0ada6f8551a80b8235 +size 87142 diff --git a/data/2025/2504_07xxx/2504.07961/images/d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg b/data/2025/2504_07xxx/2504.07961/images/d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cd0fef59a7530bd9ff7a710ef4e54fccc35f0a3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35ee86f76dea4458d31cc066c8cb223d7a5addf1878bede7cb0d8365e766efe2 +size 92252 diff --git a/data/2025/2504_07xxx/2504.07961/images/da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg b/data/2025/2504_07xxx/2504.07961/images/da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b55f2181dde8f7628df98bda60ad5c8ad1d26c92 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3e4c770b74e1f750ea238de159a3c8b3eae5f6e8fe70eb7518b5cbf47a60cd +size 16722 diff --git a/data/2025/2504_07xxx/2504.07961/images/db53542f802407a82f43b8794fb4e1b0a6a2231ecb1e1cb025ac924bfe31dbb1.jpg b/data/2025/2504_07xxx/2504.07961/images/db53542f802407a82f43b8794fb4e1b0a6a2231ecb1e1cb025ac924bfe31dbb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5eee900ae3214483579301f80b8eba5607958b6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/db53542f802407a82f43b8794fb4e1b0a6a2231ecb1e1cb025ac924bfe31dbb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8370438c07d45b4e778dabecfdd40474b3fe6fb14cf81006b7e388c31645663 +size 9146 diff --git a/data/2025/2504_07xxx/2504.07961/images/e500ec47bdd68dbe13dcb0224c81e86421f92739745664a0c0fbbc0aeb1224c8.jpg b/data/2025/2504_07xxx/2504.07961/images/e500ec47bdd68dbe13dcb0224c81e86421f92739745664a0c0fbbc0aeb1224c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b95920bd767f9498deac367e1b82e6bc60e6ef2f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/e500ec47bdd68dbe13dcb0224c81e86421f92739745664a0c0fbbc0aeb1224c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e36fd4bd21c4eebd8bae38aeb5d86217e2c6998945d2280ffc44e9a8104daee8 +size 8617 diff --git a/data/2025/2504_07xxx/2504.07961/images/e7dbf373c3a0f15e7d346fa4ae0c146ce8c2a9035deb83b1ed5aed834adbe41a.jpg b/data/2025/2504_07xxx/2504.07961/images/e7dbf373c3a0f15e7d346fa4ae0c146ce8c2a9035deb83b1ed5aed834adbe41a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fd30b0c4e6f2be07094cfe7e5d7fe61bf3a1ce2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/e7dbf373c3a0f15e7d346fa4ae0c146ce8c2a9035deb83b1ed5aed834adbe41a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0201f60392645f379608e4dfde35b06aa990cbf4ebcc9e92d3f33667ea614cb2 +size 7405 diff --git a/data/2025/2504_07xxx/2504.07961/images/f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg b/data/2025/2504_07xxx/2504.07961/images/f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab82a969245d3f8eb8cb8e1b0c7811f170dd820f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/images/f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a6b3ac8143bf7465fd67fd69b47b91e76448a0c5c97119e5019efa4f6820561 +size 16445 diff --git a/data/2025/2504_07xxx/2504.07961/layout.json b/data/2025/2504_07xxx/2504.07961/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7b401a98b4b0cca966a4c37267ae2c028375c762 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07961/layout.json @@ -0,0 +1,12966 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 64, + 103, + 545, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 103, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 64, + 103, + 545, + 120 + ], + "type": "text", + "content": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 142, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 142, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 102, + 142, + 506, + 172 + ], + "type": "text", + "content": "Zeren Jiang1 Chuanxia Zheng1 Iro Laina1 Diane Larlus2 Andrea Vedaldi1 \n1Visual Geometry Group, University of Oxford 2Naver Labs Europe" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 174, + 507, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 174, + 507, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 174, + 507, + 185 + ], + "type": "text", + "content": "{zeren, cxzheng, iro, vedaldi}@robots.ox.ac.uk diane.larlus@naverlabs.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 262, + 190, + 346, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 190, + 346, + 200 + ], + "spans": [ + { + "bbox": [ + 262, + 190, + 346, + 200 + ], + "type": "text", + "content": "geo4d.github.io" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 58, + 230, + 553, + 392 + ], + "blocks": [ + { + "bbox": [ + 58, + 230, + 553, + 392 + ], + "lines": [ + { + "bbox": [ + 58, + 230, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 58, + 230, + 553, + 392 + ], + "type": "image", + "image_path": "0e871412a73fb1a4e0d250027f7de496bdefed521e06be3fdba1bc8a057c63dd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 393, + 555, + 427 + ], + "lines": [ + { + "bbox": [ + 55, + 393, + 555, + 427 + ], + "spans": [ + { + "bbox": [ + 55, + 393, + 555, + 427 + ], + "type": "text", + "content": "Figure 1. Geo4D repurposes a video diffusion model [102] for monocular 4D reconstruction. It uses only synthetic data for training, yet generalizes well to out-of-domain real videos. It predicts several geometric modalities, including point maps, disparity maps, and ray maps, fusing and aligning them to obtain state-of-the-art dynamic reconstruction even for scenes with extreme object and camera motion." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 151, + 437, + 200, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 437, + 200, + 449 + ], + "spans": [ + { + "bbox": [ + 151, + 437, + 200, + 449 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 463, + 295, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 463, + 295, + 629 + ], + "spans": [ + { + "bbox": [ + 54, + 463, + 295, + 629 + ], + "type": "text", + "content": "We introduce Geo4D, a method to repurpose video diffusion models for monocular 3D reconstruction of dynamic scenes. By leveraging the strong dynamic priors captured by large-scale pre-trained video models, Geo4D can be trained using only synthetic data while generalizing well to real data in a zero-shot manner. Geo4D predicts several complementary geometric modalities, namely point, disparity, and ray maps. We propose a new multi-modal alignment algorithm to align and fuse these modalities, as well as a sliding window approach at inference time, thus enabling robust and accurate 4D reconstruction of long videos. Extensive experiments across multiple benchmarks show that Geo4D significantly surpasses state-of-the-art video depth estimation methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 656, + 135, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 656, + 135, + 668 + ], + "spans": [ + { + "bbox": [ + 55, + 656, + 135, + 668 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "content": "We consider the problem of feed-forward 4D reconstruction, which involves learning a neural network to reconstruct the 3D geometry of a dynamic scene from a monoc" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 437, + 553, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 553, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 553, + 510 + ], + "type": "text", + "content": "ular video. This task is particularly challenging for videos captured in uncontrolled settings, such as those shot with handheld cameras or downloaded from the Internet. However, a robust solution to this problem would have a tremendous impact on a wide range of applications, from video understanding to computer graphics and robotics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 510, + 554, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 554, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 554, + 629 + ], + "type": "text", + "content": "4D reconstruction from videos is related to multi-view static 3D reconstruction, which is typically addressed using methods from visual geometry like bundle adjustment. Recent neural networks [89, 92] have emerged as powerful tools that can replace, or at least complement, bundle adjustment. They excel especially in difficult reconstruction scenarios, involving, e.g., textureless surfaces and occlusions, thanks to the priors they learn from data. Given the additional challenges involved in 4D reconstruction, we expect that such priors would benefit this task even more." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "type": "text", + "content": "In fact, powerful networks like DUSt3R [92], designed for static multi-view 3D reconstruction, have recently been extended to the dynamic case, for example by MonST3R [113]. However, these models are heavily engineered to solve specific 3D reconstruction problems. Most importantly, they require significant amounts of training data with 3D annotations for supervision. Such data" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 37, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 37, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 37, + 574 + ], + "type": "text", + "content": "arXiv:2504.07961v2 [cs.CV] 19 Aug 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": "is difficult to collect for dynamic scenes, especially in real life. This suggests using 4D synthetic training data instead. However, this data is difficult to obtain at scale, and the gap with the real world can compromise generalization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 122, + 295, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 122, + 295, + 277 + ], + "spans": [ + { + "bbox": [ + 54, + 122, + 295, + 277 + ], + "type": "text", + "content": "One way to mitigate this problem is to pre-train the model on tasks related to 3D reconstruction for which real data is easily available. For example, DUSt3R [92] and derived methods [113] use image matching for pretraining [98]. Here, we suggest starting instead from an off-the-shelf video generator. Video generators are powerful models, often considered proxies of world simulators [37, 54, 59]. More importantly for us, the videos they generate demonstrate an understanding of effects like camera motion and perspective, as well as typical object motion in the context of a scene. However, they only generate pixels, leaving any 3D or 4D understanding implicit and thus not directly actionable." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 278, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 278, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 54, + 278, + 295, + 445 + ], + "type": "text", + "content": "In this work, we show that a pre-trained off-the-shelf video generator can be turned into an effective monocular feed-forward 4D reconstructor. To this end, we introduce Geo4D, a novel approach for adapting Video Generators for Geometric 4D Reconstruction. With Geo4D, we demonstrate that these generic video architectures can successfully solve complex 4D reconstruction tasks, which is a step towards future video foundation models that natively integrate 4D geometry. Prior work such as Marigold [28] and concurrent work DepthCrafter [22] have looked at adapting, respectively, image and video generators for depth estimation. Here, we go one step further and consider the full recovery of 4D geometry, including camera motion and dynamic 3D structure." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 447, + 295, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 447, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 54, + 447, + 295, + 567 + ], + "type": "text", + "content": "With Geo4D, our goal is to make 4D geometry explicit in the video generator. This in turn requires us to choose an explicit representation of 4D information. We follow DUSt3R and adopt its viewpoint-invariant point maps. Namely, we associate each pixel in each frame with the coordinate of the corresponding 3D point, expressed relative to the first frame in the video, used as a reference. Hence, the static parts of the point clouds extracted from the different frames line up, and the dynamic parts form a 3D 'trace' of the motion of the dynamic objects, as shown in Fig. 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 569, + 295, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 569, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 55, + 569, + 295, + 687 + ], + "type": "text", + "content": "Viewpoint-invariant point maps are a powerful representation because they implicitly encode the camera motion and intrinsics and can be easily predicted by a neural network [92]. However, they are not necessarily the best representation for all parts of the scene, particularly for points far away from the observer or even at infinity, such as the sky. We thus consider two more modalities with better dynamic range, namely disparity maps and camera ray maps. Ray maps, in particular, are defined for all image pixels regardless of the scene geometry." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "Our model thus predicts three modalities: point, disparity, and ray maps. These modalities are redundant in prin" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 144 + ], + "type": "text", + "content": "ciple, but complementary in practice. At test time, we reconcile them via a fast, global optimization step and show that this leads to significantly more robust 4D reconstructions. Due to depth and ray map prediction, we show very strong empirical results on video depth estimation and in the recovery of the camera orientation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 144, + 553, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 144, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 144, + 553, + 228 + ], + "type": "text", + "content": "One of the challenges of monocular 4D reconstruction is that it is ambiguous, significantly more so than static 3D reconstruction. However, the stochastic nature of the video generator can help deal with this ambiguity. We also introduce uncertainty maps in the encoder-decoder architecture that processes the geometric maps, and integrate them into the multi-modal alignment process." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 229, + 553, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 229, + 553, + 361 + ], + "spans": [ + { + "bbox": [ + 313, + 229, + 553, + 361 + ], + "type": "text", + "content": "Overall, our contributions are as follows. (i) We introduce Geo4D, a 4D feed-forward network for dynamic scene reconstruction that builds on top of an off-the-shelf video generator. (ii) We suggest generating multiple partially redundant geometric modalities and fusing them at test time via lightweight optimization. (iii) We show the benefits of this multi-modal fusion in terms of improved 4D prediction accuracy. Experiments show that this model can reconstruct even highly dynamic scenes (such as the drifting scene in DAVIS [23] presented in Fig. 1) and outperforms current video depth and camera rotation estimation methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 373, + 400, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 373, + 400, + 385 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 400, + 385 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 393, + 481, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 393, + 481, + 406 + ], + "spans": [ + { + "bbox": [ + 313, + 393, + 481, + 406 + ], + "type": "text", + "content": "2.1. Dynamic Scene Reconstruction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 411, + 553, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 553, + 567 + ], + "type": "text", + "content": "Static 3D reconstruction. Feed-forward 3D reconstruction has achieved remarkable success across various representations, including voxels [11, 74, 83], meshes [18, 72, 90], and point clouds [41, 110]. These advancements have been further driven by implicit neural representations [52, 56, 60, 75] and the emergence of 3D Gaussian Splatting (3D-GS) [7, 9, 29, 76, 79, 80]. Recently, DUS3R [92] introduced a point map representation for scene-level 3D reconstruction, followed by [35, 86, 89, 104]. However, these models predominantly focus on static 3D reconstruction. Our approach also uses point maps as a representation but extends them to handle dynamic scenes, which present additional challenges due to object motion over time." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "type": "text", + "content": "Iterative 4D reconstruction. Iterative or optimization-based approaches reconstruct 4D models from monocular videos by iteratively fitting the observed data. Classical techniques often rely on RGB-D sensors [24, 53], but such steps are impractical for many real-world scenes. Recently, with advancements in neural representations [52, 56], NeRF-based approaches [27, 38, 39, 57, 58, 62] have shown impressive results. However, volume rendering in NeRF is computationally expensive. Convergence and rendering speed can be improved by using 3D-GS representations [12, 29, 34, 43, 91, 99, 107, 111], which reduce but do not eliminate the cost of iterative optimization. Very" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 72, + 553, + 225 + ], + "blocks": [ + { + "bbox": [ + 60, + 72, + 553, + 225 + ], + "lines": [ + { + "bbox": [ + 60, + 72, + 553, + 225 + ], + "spans": [ + { + "bbox": [ + 60, + 72, + 553, + 225 + ], + "type": "image", + "image_path": "d4dfa354b18df642a9482367bea792839ff1939d45e9f0c86fd9d7e7655772fc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "lines": [ + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "spans": [ + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "type": "text", + "content": "Figure 2. Overview of Geo4D. During training, video conditions are injected by locally concatenating the latent feature of the video with diffused geometric features " + }, + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^{\\mathrm{X}},\\mathbf{z}_t^{\\mathrm{D}},\\mathbf{z}_t^{\\mathrm{r}}" + }, + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "type": "text", + "content": " and are injected globally via cross-attention in the denoising U-Net, after CLIP encoding and a query transformer. The U-Net is fine-tuned via Eq. 2. During inference, iteratively denoised latent features " + }, + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}_0^{\\mathrm{X}},\\hat{\\mathbf{z}}_0^{\\mathrm{D}},\\hat{\\mathbf{z}}_0^{\\mathrm{r}}" + }, + { + "bbox": [ + 55, + 226, + 555, + 270 + ], + "type": "text", + "content": " are decoded by the fine-tuned VAE decoder, followed by multi-modal alignment optimization for coherent 4D reconstruction." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 282, + 294, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 282, + 294, + 390 + ], + "spans": [ + { + "bbox": [ + 54, + 282, + 294, + 390 + ], + "type": "text", + "content": "recently, MegaSaM [40] achieved highly accurate and robust camera pose estimation and reconstruction for dynamic videos, but it requires accurate monocular depth priors. Similarly, Uni4D [108] produces accurate 4D reconstructions by leveraging various visual foundation models and performing multi-stage bundle adjustment. In contrast, our approach is a diffusion-driven feed-forward framework, which eliminates the need for per-video bundle adjustment and external depth estimation models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 395, + 296, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 395, + 296, + 550 + ], + "spans": [ + { + "bbox": [ + 55, + 395, + 296, + 550 + ], + "type": "text", + "content": "Feed-forward 4D reconstruction. Similar to our approach, recent works have started to explore feed-forward 4D reconstruction for dynamic scenes: a monocular video with dynamic objects is processed by a neural network to recover a 4D representation. For objects, L4GM [66] andAnimate3D [26] first generate multi-view videos from a monocular video input, and subsequently apply 3D-GS [29] to reconstruct a temporally consistent 4D model. For scenes, a notable example is MonST3R [113], which adapts the static scene reconstruction of DUSt3R [92] to handle dynamic scenes. Very recently, Easi3R [8] applies attention adaptation during inference and performs 4D reconstruction based on DUSt3R [92] in an efficient, training-free manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 563, + 211, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 563, + 211, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 563, + 211, + 574 + ], + "type": "text", + "content": "2.2. Geometric Diffusion Models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "type": "text", + "content": "Our method builds upon advancements in video diffusion models [3, 4, 16, 19, 21, 31, 73, 88, 94, 102, 112], which generate temporally consistent videos from text or image prompts. Recent studies have explored the rich 3D priors embedded within large-scale pre-trained diffusion models, employing either knowledge distillation [25, 42, 51, 61, 87, 96] or fine-tuning [20, 36, 45-47, 71, 85, 118] for 3D reconstruction and generation. While these methods have significantly advanced single-object 3D reconstruction from sparse inputs, they remain largely constrained to static, isolated objects centered within an image. Beyond single" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 282, + 555, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 282, + 555, + 414 + ], + "spans": [ + { + "bbox": [ + 313, + 282, + 555, + 414 + ], + "type": "text", + "content": "object reconstruction, several recent efforts have extended pre-trained diffusion models to tackle scene-level 3D tasks, such as optical flow estimation [69], view synthesis [10, 15, 44, 68, 81, 109], depth estimation [13, 28, 117], and normal estimation [14, 33, 63]. More related to our approach, Matrix3D [49] jointly predicts depth and camera parameters, and WVD [115] introduces a hybrid RGB+point map representation for scene reconstruction. However, these approaches assume static 3D environments, whereas we address dynamic 4D scene reconstruction, which is a much harder problem due to object motion across time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 415, + 556, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 415, + 556, + 595 + ], + "spans": [ + { + "bbox": [ + 313, + 415, + 556, + 595 + ], + "type": "text", + "content": "More closely related to our approach, concurrent GeometryCrafter [103] introduced a point map VAE with a dual encoder-decoder architecture to improve reconstruction accuracy. However, their point maps are defined in individual camera coordinates, necessitating the use of additional segmentation [30] and tracking models [101] to recover the global point map and estimate camera poses. Aether [82], on the other hand, outputs depth maps and ray maps from a video diffusion model for 4D reconstruction. In contrast, our experiments demonstrate that performance can be significantly enhanced by jointly predicting multiple geometric modalities that capture diverse dynamic ranges, ensuring better temporal coherence and robustness. Importantly, our approach is self-contained and does not rely on external models, enhancing its generality and reliability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 609, + 370, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 609, + 370, + 620 + ], + "spans": [ + { + "bbox": [ + 314, + 609, + 370, + 620 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": "Our goal is to learn a neural network " + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": " that can reconstruct dynamic 3D scenes from monocular videos. Given as input a monocular video " + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{I^i\\}_{i=1}^N" + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": " consisting of " + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": " frames, where each frame is an RGB image " + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "inline_equation", + "content": "I^i \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": ", the network " + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 313, + 630, + 555, + 690 + ], + "type": "text", + "content": " returns a representation of its 4D geometry:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 359, + 700, + 555, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 700, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 359, + 700, + 555, + 715 + ], + "type": "interline_equation", + "content": "f _ {\\boldsymbol {\\theta}}: \\left\\{\\boldsymbol {I} ^ {i} \\right\\} _ {i = 1} ^ {N} \\mapsto \\left\\{\\left(\\boldsymbol {D} ^ {i}, \\boldsymbol {X} ^ {i}, \\boldsymbol {r} ^ {i}\\right) \\right\\} _ {i = 1} ^ {N}. \\tag {1}", + "image_path": "61da481cb2f4a6c3d3a45a38a155d0340b610c8b37b7fd2d27435e67430bc193.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "content": "The network computes the disparity map " + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "inline_equation", + "content": "D^{i}\\in \\mathbb{R}^{H\\times W\\times 1}" + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "content": " the viewpoint-invariant point map " + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "inline_equation", + "content": "X^{i}\\in \\mathbb{R}^{H\\times W\\times 3}" + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "content": ", and the ray map " + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "inline_equation", + "content": "\\pmb {r}^i\\in \\mathbb{R}^{H\\times W\\times 6}" + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "content": " for each frame " + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "inline_equation", + "content": "I^i" + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "inline_equation", + "content": "i = 1,\\dots ,N" + }, + { + "bbox": [ + 55, + 71, + 294, + 167 + ], + "type": "text", + "content": ". As we discuss in Sec. 3.2, these quantities collectively represent the 4D geometry of a scene, including its dynamic structure and time-varying camera extrinsic and intrinsic parameters. No camera parameters are provided as input; these are implicitly estimated by the model as well." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "content": "We implement " + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "content": " as a video diffusion model, where " + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 55, + 167, + 294, + 251 + ], + "type": "text", + "content": " are the learnable parameters. We discuss the relevant background on video diffusion models in Sec. 3.1. Then, in Sec. 3.2, we describe how we extend the model to predict the three modalities of the 4D geometry. Finally, in Sec. 3.3, we describe how we fuse and align these modalities to obtain a coherent 4D reconstruction at test time." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 258, + 254, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 258, + 254, + 270 + ], + "spans": [ + { + "bbox": [ + 55, + 258, + 254, + 270 + ], + "type": "text", + "content": "3.1. Preliminaries: Video Diffusion Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": "Our key insight is that by building on pre-trained video diffusion models, our approach can exploit the strong motion and scene geometry priors inherently encoded within these models. Specifically, we build Geo4D on top of DynamiCrafter [102], a \"foundation\" video diffusion model. DynamiCrafter is a latent diffusion model [67]: it uses a variational autoencoder (VAE) to obtain a more compact video representation and thus reduce computational complexity. During training, a target sequence " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\pmb{x}^{1:N}" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " is first encoded into the latent space using the encoder " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "z_0^{1:N} = \\mathcal{E}(\\pmb{x}^{1:N})" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": ", and then perturbed by " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{z}_t^{1:N} = \\sqrt{\\bar{\\alpha}_t}\\pmb{z}_0^{1:N} + \\sqrt{1 - \\bar{\\alpha}_t}\\epsilon^{1:N}" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " is Gaussian noise, and " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\bar{\\alpha}_t" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " is the noise level at step " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " noisings steps. The denoising network " + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 55, + 276, + 295, + 456 + ], + "type": "text", + "content": " is then trained to reverse this noisng process by optimizing the following objective:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 463, + 294, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 463, + 294, + 494 + ], + "spans": [ + { + "bbox": [ + 63, + 463, + 294, + 494 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {\\theta}} \\mathbb {E} _ {(\\boldsymbol {x} ^ {1: N}, y), t, \\epsilon^ {1: N} \\sim \\mathcal {N} (\\boldsymbol {0}, \\boldsymbol {I})} \\left\\| \\epsilon^ {1: N} - \\epsilon_ {\\boldsymbol {\\theta}} \\left(\\boldsymbol {z} _ {t} ^ {1: N}, t, y\\right) \\right\\| _ {2} ^ {2}, \\tag {2}", + "image_path": "e7dbf373c3a0f15e7d346fa4ae0c146ce8c2a9035deb83b1ed5aed834adbe41a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "content": " is the conditional input. Once trained, the model generates a video prompted by " + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "content": " via iteratively denoising from pure noise " + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_T^{1:N}" + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "content": ", and then decoding the denoised latent with a decoder " + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{X}} = \\mathcal{D}(\\hat{\\mathbf{z}}_0^{1:N})" + }, + { + "bbox": [ + 55, + 494, + 295, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 550, + 251, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 550, + 251, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 550, + 251, + 562 + ], + "type": "text", + "content": "3.2. Multi-modal Geometric 4D Diffusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "spans": [ + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "type": "text", + "content": "We first provide a more precise description of the 4D multimodal representation output by our model, and then explain how it is encoded in the latent space for generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": "Multi-modal geometric representations. The dynamic 3D structure of a scene is represented by a sequence of point maps " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\{\\pmb{X}^i\\}_{i=1}^N" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": ", one for each of its " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " frames. Let " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "(u, v)" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " denote the pixel coordinates in the image plane. Then, the value " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "X_{uv}^i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " is the 3D coordinate of the scene point that lands at pixel " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "(u, v)" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " in frame " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "I^i" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": ", expressed in the reference frame of camera " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": ". Because the reference frame is fixed and independent of the time-varying viewpoint, we call these point maps viewpoint-invariant. The" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "advantages of this representation are convincingly demonstrated by DUSt3R [92]. For a static scene, or by knowing which image pixels correspond to the static part of a scene, knowledge of the point maps allows recovery of the intrinsic and extrinsic camera parameters as well as the scene depth. This is done by solving an optimization problem that aligns the dynamic point maps with a pinhole camera model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": "As noted in Sec. 1, while point maps " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\{\\pmb{X}^i\\}_{i=1}^N" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": " fully encode the 4D geometry of the scene, they are not effective for all parts of the scene. Their dynamic range is limited, and they are not even defined for points at infinity (e.g. sky). Hence, we consider two additional modalities: disparity maps " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\{\\pmb{D}^i\\}_{i=1}^N" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": " and camera ray maps " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\{\\pmb{r}^i\\}_{i=1}^N" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": ", also encouraged by prior evidence [14, 33, 49] that diffusion models can benefit from learning to predict multiple quantities. Disparity maps are not viewpoint-invariant, but have a better dynamic range than point maps (the disparity is zero for points at infinity). Ray maps represent only the camera parameters and are defined for all image pixels, independent of the scene geometry. For the disparity map, " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "D_{uv}^i" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": " is the disparity (inverse depth) of the scene point that lands at pixel " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": ", as seen in frame " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "I^i" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": ". For the ray map, we adopt Plücker coordinates [75, 97, 118], i.e., " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\pmb{r}_{uv} = (\\pmb{d}_{uv}, \\pmb{m}_{uv})" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\pmb{d}_{uv} = \\mathbf{R}^\\top \\mathbf{K}^{-1}(u,v,1)^\\top" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": " is the ray direction, and " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\pmb{m}_{uv} = -\\mathbf{R}^\\top \\mathbf{t} \\times \\pmb{d}_{uv}" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}, \\mathbf{K}, \\mathbf{t})" + }, + { + "bbox": [ + 313, + 156, + 555, + 384 + ], + "type": "text", + "content": " are the camera's rotation, calibration, and translation parameters." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "text", + "content": "Multi-modal latent encoding. The three modalities come in the form of images and can thus be naturally predicted by the video diffusion architecture. However, this requires first mapping them to the latent space, for which we need suitable versions of the encoder " + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "text", + "content": " and decoder " + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "text", + "content": " from Sec. 3.1. Related prior work [14, 28] for depth prediction simply repurposes a pre-trained image encoder-decoder without modification. We found this to work well for disparity and ray maps, but not for point maps. Hence, for the point maps only, we fine-tune the pre-trained decoder " + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 313, + 386, + 555, + 518 + ], + "type": "text", + "content": " using the following objective function [100]:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 321, + 527, + 553, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 527, + 553, + 566 + ], + "spans": [ + { + "bbox": [ + 321, + 527, + 553, + 566 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = - \\sum_ {u v} \\ln \\frac {1}{\\sqrt {2} \\sigma_ {u v}} \\exp - \\frac {\\sqrt {2} \\ell_ {1} (\\mathcal {D} (\\mathcal {E} (\\boldsymbol {X})) _ {u v} , \\boldsymbol {X} _ {u v})}{\\sigma_ {u v}}, \\tag {3}", + "image_path": "db53542f802407a82f43b8794fb4e1b0a6a2231ecb1e1cb025ac924bfe31dbb1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}^{H\\times W}" + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "text", + "content": " is the uncertainty of the reconstructed point map, which is also predicted by an additional branch of our VAE decoder. We leave the encoder " + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "text", + "content": " unchanged to modify the latent space as little as possible; instead, we normalize the point maps to the range " + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "inline_equation", + "content": "[-1,1]" + }, + { + "bbox": [ + 313, + 567, + 553, + 639 + ], + "type": "text", + "content": " to make them more compatible with the pre-trained image encoder." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 641, + 553, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 553, + 689 + ], + "type": "text", + "content": "Video conditioning. The original video diffusion model is conditioned on a single image, but here we need to condition it on the entire input video " + }, + { + "bbox": [ + 313, + 641, + 553, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{I^i\\}_{i=1}^N" + }, + { + "bbox": [ + 313, + 641, + 553, + 689 + ], + "type": "text", + "content": ". To this end, we use a hybrid conditioning mechanism with two streams." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "text", + "content": "As shown in Fig. 2, in one stream, we extract a global representation of each frame " + }, + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "inline_equation", + "content": "\\pmb{I}^i" + }, + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "text", + "content": " by passing it to" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 157 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 157 + ], + "type": "text", + "content": "CLIP [64] followed by a lightweight learnable query transformer [1]. These vectors are incorporated in the transformer via cross-attention layers injected in each U-Net block. In the other stream, we extract local spatial features from the VAE encoder and concatenate them channel-wise to the noised latents, encoding the generated 4D modalities " + }, + { + "bbox": [ + 55, + 72, + 296, + 157 + ], + "type": "inline_equation", + "content": "\\{(D^i,X^i,r^i)\\}_{i = 1}^N" + }, + { + "bbox": [ + 55, + 72, + 296, + 157 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 164, + 191, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 164, + 191, + 177 + ], + "spans": [ + { + "bbox": [ + 55, + 164, + 191, + 177 + ], + "type": "text", + "content": "3.3. Multi-Modal Alignment" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 182, + 296, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 182, + 296, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 182, + 296, + 289 + ], + "type": "text", + "content": "As noted, Geo4D predicts several non-independent geometric modalities. Furthermore, processing all frames of a long monocular video simultaneously with a video diffusion model is computationally prohibitive. Therefore, during inference, we use a temporal sliding window that segments the video into multiple overlapping clips, with partial overlap to facilitate joining them. The goal of this section is to fuse the resulting multi-modal and multi-window data into a single, coherent reconstruction of the entire video." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": "Temporal sliding window. Given a video " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{\\pmb{I}^i\\}_{i=1}^N" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " frames, we divide it into several video clips " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = \\{g^k\\}" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "k \\in S" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": ", where each clip " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "g^k" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "\\{I^i\\}_{i=k}^{k+V-1}" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": ", and the set of starting indices is " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{S} = \\{0, s, 2s, \\ldots, \\left\\lfloor \\frac{N-V}{s} \\right\\rfloor s\\} \\cup \\{N-V\\}" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " is the sliding window stride. The final term " + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "inline_equation", + "content": "\\{N-V\\}" + }, + { + "bbox": [ + 55, + 292, + 296, + 376 + ], + "type": "text", + "content": " ensures that the last clip always includes the final frames of the video." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "text", + "content": "Alignment objectives. First, given the predicted point maps " + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "inline_equation", + "content": "X^{i,g}" + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "text", + "content": " for each frame " + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "text", + "content": " in each video clip " + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "inline_equation", + "content": "g \\in \\mathcal{G}" + }, + { + "bbox": [ + 55, + 380, + 296, + 475 + ], + "type": "text", + "content": ", we derive corresponding globally aligned point maps in world coordinates, as well as the relative camera motion and scale parameters. We denote these quantities with the p subscript to emphasize that they are inferred from the point map predictions. To do so, we extend the pairwise global alignment loss from DUSt3R to a group-wise one:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 483, + 294, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 483, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 61, + 483, + 294, + 525 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {p}} \\left(\\boldsymbol {X}, \\lambda_ {\\mathrm {p}} ^ {g}, \\boldsymbol {P} _ {\\mathrm {p}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\sum_ {u v} \\left\\| \\frac {\\boldsymbol {X} _ {u v} ^ {i} - \\lambda_ {\\mathrm {p}} ^ {g} \\boldsymbol {P} _ {\\mathrm {p}} ^ {g} \\boldsymbol {X} _ {u v} ^ {i , g}}{\\boldsymbol {\\sigma} _ {u v} ^ {i , g}} \\right\\| _ {1}, \\tag {4}", + "image_path": "82fafccbf5422e0e503459b4eec75c5222de00b4abd32f90963cc3f024704a27.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{p}}^{g}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{p}}^{g} = [\\mathbf{R}_{\\mathrm{p}}^{g}|\\beta_{\\mathrm{p}}^{g}]" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " denote the group-wise scale and transformation matrix that align the group-relative point maps " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "X^{i,g}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " to the point maps " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "X^i" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " expressed in the global reference frame. " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "\\sigma_{uv}^{i,g}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " denotes the uncertainty of the point map for frame " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " in group " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " at pixel " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": ". We further parameterize each of these point maps as " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "X_{uv}^{i} = \\mathbf{R}_{\\mathrm{p}}^{i^{\\top}}\\mathbf{K}_{\\mathrm{p}}^{i^{-1}}D_{\\mathrm{p},uv}^{i^{-1}}(u,v,1) + o_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " in terms of each camera's calibration " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": ", world-to-camera rotation " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": ", and center " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "o_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " expressed in the global reference frame, and the disparity map " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": ". Substituting this expression into the loss function (4) and minimizing it, we can thus recover " + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{p}}^{i},\\mathbf{R}_{\\mathrm{p}}^{i},o_{\\mathrm{p}}^{i}, D_{\\mathrm{p}}^{i},\\lambda_{\\mathrm{p}}^{g},P_{\\mathrm{p}}^{g}" + }, + { + "bbox": [ + 55, + 525, + 296, + 674 + ], + "type": "text", + "content": " from the predicted point maps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "text", + "content": "The steps above infer the disparity maps " + }, + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{p}}^{i}" + }, + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "text", + "content": " from the point maps, but the model also predicts disparity maps " + }, + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{d}}^{i}" + }, + { + "bbox": [ + 55, + 676, + 296, + 713 + ], + "type": "text", + "content": " directly, where the d subscript denotes disparity prediction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 72, + 503, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 503, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 503, + 84 + ], + "type": "text", + "content": "We introduce the following loss to align them:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 322, + 90, + 555, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 90, + 555, + 118 + ], + "spans": [ + { + "bbox": [ + 322, + 90, + 555, + 118 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d}} \\left(\\boldsymbol {D} _ {\\mathrm {p}}, \\lambda_ {\\mathrm {d}} ^ {g}, \\beta_ {\\mathrm {d}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left\\| \\boldsymbol {D} _ {\\mathrm {p}} ^ {i} - \\lambda_ {\\mathrm {d}} ^ {g} \\boldsymbol {D} _ {d} ^ {i, g} - \\beta_ {\\mathrm {d}} ^ {g} \\right\\| _ {1}, \\tag {5}", + "image_path": "105a405ac4ff6c789f6798e930d20e77df96678809918788f2d6647f5316cbf7.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "spans": [ + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{d}}^{g}" + }, + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "inline_equation", + "content": "\\beta_{\\mathrm{d}}^{g}" + }, + { + "bbox": [ + 313, + 124, + 550, + 137 + ], + "type": "text", + "content": " are optimized scale and shift parameters." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "spans": [ + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": "Finally, the ray maps " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{r}" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": " also encode camera pose. To align them with the global camera parameters " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}_{\\mathrm{p}},\\mathbf{K}_{\\mathrm{p}},\\boldsymbol{o}_{\\mathrm{p}})" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": " obtained from the point map, we first solve an optimization problem to extract the camera parameters from the ray map " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{r}^{i,g} = \\langle \\pmb{d}^{i,g},\\pmb{m}^{i,g}\\rangle" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": " for each group " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": " at frame " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": ". Following Ray Diffusion [114], the camera center " + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{o}_{\\mathrm{c}}^{i,g}" + }, + { + "bbox": [ + 313, + 137, + 555, + 232 + ], + "type": "text", + "content": " is solved by finding the 3D world coordinate closest to the intersection of all rays:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 339, + 238, + 555, + 263 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 238, + 555, + 263 + ], + "spans": [ + { + "bbox": [ + 339, + 238, + 555, + 263 + ], + "type": "interline_equation", + "content": "\\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} = \\arg \\min _ {\\boldsymbol {p} \\in \\mathbb {R} ^ {3}} \\sum_ {u \\in H, v \\in W} \\| \\boldsymbol {p} \\times \\boldsymbol {d} _ {u v} ^ {i, g} - \\boldsymbol {m} _ {u v} ^ {i, g} \\| ^ {2}. \\tag {6}", + "image_path": "349163f4532c8b2177783f9f58045e52c7cd8ec68e668b9f16e1ca2f4a326abc.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "spans": [ + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "text", + "content": "The camera extrinsics are solved by optimizing for the matrix " + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "inline_equation", + "content": "\\mathbf{H}" + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "text", + "content": " that transforms the predicted per-pixel ray directions " + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "inline_equation", + "content": "d_{uv}^{i,g}" + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "text", + "content": " to the ray directions " + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{uv}" + }, + { + "bbox": [ + 313, + 270, + 554, + 306 + ], + "type": "text", + "content": " of a canonical camera:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 345, + 312, + 555, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 312, + 555, + 338 + ], + "spans": [ + { + "bbox": [ + 345, + 312, + 555, + 338 + ], + "type": "interline_equation", + "content": "\\mathbf {H} ^ {i, g} = \\underset {\\| \\mathbf {H} \\| = 1} {\\arg \\min } \\sum_ {u \\in H, v \\in W} \\left\\| \\mathbf {H} d _ {u v} ^ {i, g} \\times \\mathbf {u} _ {u v} \\right\\|. \\tag {7}", + "image_path": "b89cb29cbc557e4a2704facd1add616f57970c058a17ada14246d706fded2bf7.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "text", + "content": "Then the world-to-camera rotation matrix " + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_c^{i,g}" + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "text", + "content": " and intrinsic matrix " + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_c^{i,g}" + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "text", + "content": " can be solved using the RQ-decomposition of " + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "inline_equation", + "content": "\\mathbf{H}^{i,g}" + }, + { + "bbox": [ + 313, + 346, + 555, + 382 + ], + "type": "text", + "content": ". Finally, the camera trajectory alignment loss is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 387, + 553, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 387, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 318, + 387, + 553, + 449 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {\\mathrm {p}}, \\mathbf {R} _ {\\mathrm {c}} ^ {g}, \\beta_ {\\mathrm {c}} ^ {g}, \\lambda_ {\\mathrm {c}} ^ {g}\\right) = \\sum_ {g \\in \\mathcal {G}} \\sum_ {i \\in g} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {c}} ^ {g} \\mathbf {R} _ {\\mathrm {c}} ^ {i, g} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} \\right. \\\\ \\left. + \\left\\| \\lambda_ {\\mathrm {c}} ^ {g} \\boldsymbol {o} _ {\\mathrm {c}} ^ {i, g} + \\beta_ {\\mathrm {c}} ^ {g} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right), \\tag {8} \\\\ \\end{array}", + "image_path": "665a34df6fe043aa68614ddaf1862d09d85da07e4a04e260e1736500a73a4d19.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}" + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "text", + "content": " are learnable group-wise rotation matrix, translation vector, and scale, respectively, to align the global camera trajectory " + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}_p, \\mathbf{o}_p)" + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "text", + "content": " and the predicted ones " + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}_c, \\mathbf{o}_c)" + }, + { + "bbox": [ + 313, + 456, + 554, + 517 + ], + "type": "text", + "content": ". Following MonST3R [113], we also use a loss to smooth the camera trajectory:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 522, + 553, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 522, + 553, + 564 + ], + "spans": [ + { + "bbox": [ + 316, + 522, + 553, + 564 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {s}} \\left(\\mathbf {R} _ {\\mathrm {p}}, \\boldsymbol {o} _ {p}\\right) = \\sum_ {i = 1} ^ {N} \\left(\\left\\| \\mathbf {R} _ {\\mathrm {p}} ^ {i ^ {\\top}} \\mathbf {R} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {I} \\right\\| _ {\\mathrm {f}} + \\left\\| \\boldsymbol {o} _ {\\mathrm {p}} ^ {i + 1} - \\boldsymbol {o} _ {\\mathrm {p}} ^ {i} \\right\\| _ {2}\\right). \\tag {9}", + "image_path": "e500ec47bdd68dbe13dcb0224c81e86421f92739745664a0c0fbbc0aeb1224c8.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 565, + 554, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 565, + 554, + 588 + ], + "spans": [ + { + "bbox": [ + 313, + 565, + 554, + 588 + ], + "type": "text", + "content": "The final optimization objective is the weighted combination of the losses above:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 356, + 596, + 553, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 596, + 553, + 609 + ], + "spans": [ + { + "bbox": [ + 356, + 596, + 553, + 609 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {a l l}} = \\alpha_ {1} \\mathcal {L} _ {\\mathrm {p}} + \\alpha_ {2} \\mathcal {L} _ {\\mathrm {d}} + \\alpha_ {3} \\mathcal {L} _ {\\mathrm {c}} + \\alpha_ {4} \\mathcal {L} _ {\\mathrm {s}}. \\tag {10}", + "image_path": "399f0f33a67ecd0f9b30283d31a8486770d71fd31e3285f63b9dbb90ca1b6c31.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "A note on the invariants. The model predicts point maps, disparity maps, and ray map origins up to scale, as this cannot be uniquely determined from a monocular video. The disparity map is also recovered up to a translation, which discounts the focal length (this is sometimes difficult to estimate due to the dolly zoom effect). Likewise, the ray map origin is recovered up to a shift, necessary to allow normalizing these maps." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 70, + 545, + 218 + ], + "blocks": [ + { + "bbox": [ + 65, + 70, + 545, + 218 + ], + "lines": [ + { + "bbox": [ + 65, + 70, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 65, + 70, + 545, + 218 + ], + "type": "table", + "html": "
CategoryMethodSintel [5]Bonn [55]KITTI [17]
Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑Abs Rel ↓δ < 1.25 ↑
Single-frame depthMarigold [28]0.53251.50.09193.10.14979.6
Depth-Anything-V2 [106]0.36755.40.10692.10.14080.4
Video depthNVDS [95]0.40848.30.16776.60.25358.8
ChronoDepth [70]0.68748.60.10091.10.16775.9
DepthCrafter* [22]0.27069.70.07197.20.10489.6
Video depth & Camera poseRobust-CVD [32]0.70347.8
CasualSAM [116]0.38754.70.16973.70.24662.2
MonST3R [113]0.33558.50.06396.40.10489.5
Geo4D (Ours)0.20573.50.05997.20.08693.7
", + "image_path": "c076f88109dab7c9696980c607a9ee0e70c12cc68a7a1debc504756168c7c010.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 219, + 555, + 252 + ], + "lines": [ + { + "bbox": [ + 55, + 219, + 555, + 252 + ], + "spans": [ + { + "bbox": [ + 55, + 219, + 555, + 252 + ], + "type": "text", + "content": "Table 1. Video depth estimation on Sintel [5], Bonn [55] and KITTI [17] datasets. We follow the evaluation protocols established in recent MonST3R [113] for a fair comparison. Notably, results for DepthCrafter* are reported from its latest version (v1.0.1). The Best and the second best results are highlighted." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 258, + 138, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 258, + 138, + 272 + ], + "spans": [ + { + "bbox": [ + 55, + 258, + 138, + 272 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 277, + 183, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 183, + 292 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 183, + 292 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 296, + 296, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 296, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 296, + 296, + 369 + ], + "type": "text", + "content": "Training datasets. Geo4D is trained exclusively on synthetic datasets, yet demonstrates strong generalization to real-world videos. Specifically, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. See the Supp. Mat Tab. 5 for details." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "spans": [ + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": "Training. Our Geo4D is initialized with the weights of DynamiCrafter [102] and trained using AdamW [48] with a learning rate of " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": " and a batch size of 32. We use a progressive training strategy to improve convergence and stability. First, we train the model to generate a single geometric modality, i.e., the point maps, at a fixed resolution of " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "512 \\times 320" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": ". Next, we introduce a multi-resolution training scheme to improve generalization and robustness, which includes various resolutions: " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "512 \\times 384" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "512 \\times 320" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "576 \\times 256" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "inline_equation", + "content": "640 \\times 192" + }, + { + "bbox": [ + 55, + 371, + 296, + 527 + ], + "type": "text", + "content": ". Finally, we progressively add additional geometric modalities, i.e., the ray and depth maps. Training is conducted on 4 NVIDIA H100 GPUs with a total training time of approximately one week." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "spans": [ + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": "Inference. As described in Sec. 3.2, given an " + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": "-frame video as input, we first split it into overlapping clips " + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": ", each containing " + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "inline_equation", + "content": "V = 16" + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": " frames, with a stride of " + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": ". Each video clip is encoded and fed to the diffusion model to sample multi-modal 4D parameters " + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "inline_equation", + "content": "(X^{i,g}, D^{i,g}, r^{i,g})" + }, + { + "bbox": [ + 55, + 529, + 296, + 637 + ], + "type": "text", + "content": " for the video. For sampling, we use DDIM [77] with 5 steps. Finally, the alignment algorithm in Sec. 3.2 is used to fuse the clips into a globally coherent 4D reconstruction of the entire video." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 647, + 191, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 191, + 661 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 191, + 661 + ], + "type": "text", + "content": "4.2. Video Depth Estimation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": "Testing data. Our hypothesis is that, despite being trained on synthetic data, our model can generalize well to out-of-distribution synthetic and real data, as it is based on a pre-trained video diffusion model. To test this hypothe" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 258, + 555, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 258, + 555, + 368 + ], + "spans": [ + { + "bbox": [ + 313, + 258, + 555, + 368 + ], + "type": "text", + "content": "sis, we evaluate our model on three benchmarks: Sintel [5] is a synthetic dataset that provides accurate depth annotations, covering diverse scenes with complex camera motion. KITTI [17] is a large driving dataset collected using stereo cameras and LiDAR sensors. Bonn [55] focuses on dynamic indoor scenes. To ensure fair comparisons, we follow the evaluation protocol used by MonST3R [113], where depth sequences are uniformly sampled from the datasets, extracting 50-110 frames per sequence for evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 374, + 556, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 374, + 556, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 374, + 556, + 495 + ], + "type": "text", + "content": "Metrics. Following the standard affine-invariant depth evaluation protocol [65], we align the predicted video depth with the ground-truth depth before computing metrics. However, unlike single-image depth estimation [28, 105, 106], where depth alignment is performed per frame, we enforce global scale consistency by applying a single scale and shift across the entire video sequence. For quantitative evaluation, we adopt two widely used depth metrics: absolute relative error (Abs Rel) and the percentage of inlier points (with a threshold value of " + }, + { + "bbox": [ + 313, + 374, + 556, + 495 + ], + "type": "inline_equation", + "content": "\\delta < 1.25" + }, + { + "bbox": [ + 313, + 374, + 556, + 495 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 502, + 556, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 502, + 556, + 575 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 556, + 575 + ], + "type": "text", + "content": "Baselines. We compare Geo4D to state-of-the-art single-frame depth estimation methods (Marigold [28] and Depth-Anything-V2 [106]), video depth prediction (NVDS [95], ChronoDepth [70], and DepthCrafter [22]), and joint video depth and camera pose prediction (Robust-CVD [32], CausalSAM [116], and MonST3R [113])." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "text", + "content": "Results. As shown in Table 1, all versions of Geo4D outperform state-of-the-art methods by a large margin. This includes DepthCrafter [22] and MonST3R [113], the most recent video depth diffusion model and the dynamic extension of DUSt3R to dynamic scenes, respectively. Notably, while both Geo4D and DepthCrafter are based on the same video diffusion model (DynamiCrafter), our model outperforms DepthCrafter in Abs Rel by " + }, + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "inline_equation", + "content": "24.0\\%" + }, + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "text", + "content": " on Sintel and " + }, + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "inline_equation", + "content": "17.3\\%" + }, + { + "bbox": [ + 313, + 582, + 557, + 715 + ], + "type": "text", + "content": " on KITTI, despite solving a more general problem. Qualitatively, Fig. 3 shows that Geo4D achieves more consistent results, especially for fast-moving objects." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 76, + 123, + 355 + ], + "blocks": [ + { + "bbox": [ + 56, + 76, + 123, + 355 + ], + "lines": [ + { + "bbox": [ + 56, + 76, + 123, + 355 + ], + "spans": [ + { + "bbox": [ + 56, + 76, + 123, + 355 + ], + "type": "image", + "image_path": "31fe055d0b9532f244e4df7357114bae28839107d39783d32cc77d5fe458cccd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 360, + 555, + 393 + ], + "lines": [ + { + "bbox": [ + 55, + 360, + 555, + 393 + ], + "spans": [ + { + "bbox": [ + 55, + 360, + 555, + 393 + ], + "type": "text", + "content": "Figure 3. Qualitative results comparing Geo4D with MonST3R [113]. Attributed to our group-wise inference manner and prior geometry knowledge from pretrained video diffusion, our model successfully produces consistent 4D geometry under fast motion (first row) and deceptive reflection in the water (second row)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 136, + 76, + 203, + 355 + ], + "blocks": [ + { + "bbox": [ + 136, + 76, + 203, + 355 + ], + "lines": [ + { + "bbox": [ + 136, + 76, + 203, + 355 + ], + "spans": [ + { + "bbox": [ + 136, + 76, + 203, + 355 + ], + "type": "image", + "image_path": "da603b61e692742b8f3125a2a34baeb6c0ea078414c151332d08fac574719ceb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 205, + 75, + 339, + 355 + ], + "blocks": [ + { + "bbox": [ + 205, + 75, + 339, + 355 + ], + "lines": [ + { + "bbox": [ + 205, + 75, + 339, + 355 + ], + "spans": [ + { + "bbox": [ + 205, + 75, + 339, + 355 + ], + "type": "image", + "image_path": "6b6c4f3a7661332f8da800776b38b53b84bf480635decdc6034a431285270ef8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 342, + 77, + 410, + 355 + ], + "blocks": [ + { + "bbox": [ + 342, + 77, + 410, + 355 + ], + "lines": [ + { + "bbox": [ + 342, + 77, + 410, + 355 + ], + "spans": [ + { + "bbox": [ + 342, + 77, + 410, + 355 + ], + "type": "image", + "image_path": "f70a66213219ec9125bce35fe499bbcd826887e216833dca0c34aec4136a16f0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 410, + 76, + 553, + 355 + ], + "blocks": [ + { + "bbox": [ + 410, + 76, + 553, + 355 + ], + "lines": [ + { + "bbox": [ + 410, + 76, + 553, + 355 + ], + "spans": [ + { + "bbox": [ + 410, + 76, + 553, + 355 + ], + "type": "image", + "image_path": "73b2bb4a75fa350981437555d26a2196695e4f249a9c9c3f725920b7e5eed31f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 402, + 194, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 402, + 194, + 414 + ], + "spans": [ + { + "bbox": [ + 55, + 402, + 194, + 414 + ], + "type": "text", + "content": "4.3. Camera Pose Estimation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 420, + 295, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 420, + 295, + 588 + ], + "spans": [ + { + "bbox": [ + 54, + 420, + 295, + 588 + ], + "type": "text", + "content": "Setup. We evaluate the performance of Geo4D on both the synthetic Sintel [5] dataset and the realistic TUM-dynamics [78] dataset. We follow the same evaluation protocol as in MonST3R [113]. Specifically, on Sintel, we select 14 dynamic sequences, and for TUM-dynamics, we sample the first 90 frames of each sequence with a temporal stride of 3. After aligning the predicted camera trajectory with the ground truth using the Umayama algorithm, we calculate three commonly used metrics: Absolute Translation Error (ATE), Relative Translation Error (RPE-T), and Relative Rotation Error (RPE-R). We compare our method with other state-of-the-art discriminative methods, which jointly predict camera pose and depth, including Robust-CVD [32], CausalSAM [116], and MonST3R [113]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 590, + 295, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 590, + 295, + 662 + ], + "spans": [ + { + "bbox": [ + 55, + 590, + 295, + 662 + ], + "type": "text", + "content": "Results. To the best of our knowledge, Geo4D is the first method that uses a generative model to estimate camera parameters in a dynamic scene. As shown in Tab. 2, compared to existing non-generative alternatives, we achieve much better camera rotation prediction (RPE-R) and comparable camera translation prediction (ATE and RPE-T)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 671, + 191, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 671, + 191, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 671, + 191, + 685 + ], + "type": "text", + "content": "4.4. Qualitative Comparison" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "4D reconstruction. We compare Geo4D with the state-of-the-art MonST3R method on the DAVIS [23] dataset. Up-" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 314, + 401, + 553, + 471 + ], + "blocks": [ + { + "bbox": [ + 314, + 401, + 553, + 471 + ], + "lines": [ + { + "bbox": [ + 314, + 401, + 553, + 471 + ], + "spans": [ + { + "bbox": [ + 314, + 401, + 553, + 471 + ], + "type": "table", + "html": "
MethodSintelTUM-dynamics
ATE ↓RPE-T ↓RPE-R ↓ATE ↓RPE-T ↓RPE-R ↓
Robust-CVD [32]0.3600.1543.4430.1530.0263.528
CasualSAM [116]0.1410.0350.6150.0710.0101.712
MonST3R [113]0.1080.0420.7320.0630.0091.217
Geo4D (Ours)0.1850.0630.5470.0730.0200.635
", + "image_path": "7038d9900452bdd46bac1ace91e6854e016dc9bd721e4b5891da5d7169423852.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 472, + 555, + 504 + ], + "lines": [ + { + "bbox": [ + 313, + 472, + 555, + 504 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 555, + 504 + ], + "type": "text", + "content": "Table 2. Quantitative evaluation for camera pose estimation. We achieve comparable camera pose estimation performance with other discriminative SOTA methods." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 517, + 555, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 555, + 661 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 555, + 661 + ], + "type": "text", + "content": "grading from pairwise alignment as in MonST3R to our group-wise alignment improves temporal consistency, leading to a more stable and globally coherent 4D reconstruction of point maps and camera trajectory, particularly in highly dynamic scenes. As shown in the top row of Fig. 3, Geo4D successfully tracks the racing car in 4D, whereas MonST3R struggles due to the rapid motion between pairs of images. Furthermore, likely due to the strong prior captured by the pre-trained video generative model, Geo4D correctly reconstructs the reflection of the flamingo in the water (second row in Fig. 3), whereas MonST3R misinterprets the reflection as a foreground object, resulting in incorrect depth." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "content": "Video depth prediction. We compare Geo4D with state-of-the-art video depth predictors MonST3R [113] and DepthCrafter [22] on the Sintel [5] dataset. Qualitatively, Geo4D produces more detailed geometry, for instance for" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 555, + 171 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 555, + 171 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 555, + 171 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 555, + 171 + ], + "type": "image", + "image_path": "161ac3a2e22555c8828abbf3af34a702e6e0dbcb916f70948abb49701c679c34.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 56, + 205, + 553, + 290 + ], + "blocks": [ + { + "bbox": [ + 55, + 172, + 555, + 205 + ], + "lines": [ + { + "bbox": [ + 55, + 172, + 555, + 205 + ], + "spans": [ + { + "bbox": [ + 55, + 172, + 555, + 205 + ], + "type": "text", + "content": "Figure 4. Qualitative video depth results comparing Geo4D with MonST3R [113] and DepthCrafter [22]. Owing to our proposed multimodal training and alignment, as well as the prior knowledge from diffusion, our method can infer a more detailed structure (first row) and a more accurate spatial arrangement from video (second row)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 205, + 553, + 290 + ], + "lines": [ + { + "bbox": [ + 56, + 205, + 553, + 290 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 553, + 290 + ], + "type": "image", + "image_path": "59fd5169451397ccf3bd7880e7258023dc7f0fc38ae0a0fd7f3567726904a1cf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 292, + 555, + 326 + ], + "lines": [ + { + "bbox": [ + 55, + 292, + 555, + 326 + ], + "spans": [ + { + "bbox": [ + 55, + 292, + 555, + 326 + ], + "type": "text", + "content": "Table 3. Ablation study for the different modalities of the geometric representation on the Sintel [5] dataset. We demonstrate the effectiveness of our key design choices that both leverage multi-modality as additional training supervision signal and postprocess through our proposed multi-modal alignment algorithm will improve the overall performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 56, + 331, + 294, + 394 + ], + "blocks": [ + { + "bbox": [ + 56, + 331, + 294, + 394 + ], + "lines": [ + { + "bbox": [ + 56, + 331, + 294, + 394 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 294, + 394 + ], + "type": "table", + "html": "
Strides / frameVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
150.920.21372.40.2100.0920.574
81.240.21272.80.2220.0740.524
41.890.20573.50.1850.0630.547
23.260.20472.90.1810.0580.518
", + "image_path": "5364a2a6461c500f21711140d4a24a474dee3729cfe59351e73e5d692522a123.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 396, + 295, + 429 + ], + "lines": [ + { + "bbox": [ + 55, + 396, + 295, + 429 + ], + "spans": [ + { + "bbox": [ + 55, + 396, + 295, + 429 + ], + "type": "text", + "content": "Table 4. Ablation study for the temporal sliding window stride on the Sintel [5] dataset. There is a trade-off between performance and inference speed." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 436, + 296, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 436, + 296, + 473 + ], + "spans": [ + { + "bbox": [ + 55, + 436, + 296, + 473 + ], + "type": "text", + "content": "the rope on the stick in the first row of Fig. 4, and a better spatial arrangement between different dynamic objects, as shown in the second row of Fig. 4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 480, + 149, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 480, + 149, + 493 + ], + "spans": [ + { + "bbox": [ + 55, + 480, + 149, + 493 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "type": "text", + "content": "We ablate our key design choices and the effect of different modalities on the Sintel dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 521, + 295, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 521, + 295, + 641 + ], + "spans": [ + { + "bbox": [ + 54, + 521, + 295, + 641 + ], + "type": "text", + "content": "We study the effect of multi-modality in Tab. 3. The three modalities—point map, disparity map, and ray map—can be used either at training or inference time, or both. The first two rows show that the diffusion model trained with point maps as a single modality performs worse in both video depth and camera pose estimation than the diffusion model trained with all three modalities. Therefore, the other two modalities, even if they can be seen as redundant, serve as additional supervisory signals during training, which improves the generalization ability of the diffusion model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "content": "We then investigate the effectiveness of our multi-modal alignment algorithm. Compared with the second to the fourth row in Tab. 3, which leverage only a single modality during inference, multi-modal alignment optimization (last row) achieves the best performance, showing the benefits of fusing the multiple modalities at inference time." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 332, + 555, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 555, + 440 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 555, + 440 + ], + "type": "text", + "content": "We ablate the sliding window stride in Tab. 4. Results improve with a shorter stride, in part because this means that more windows and estimates are averaged, reducing the variance of the predictions by the denoising diffusion model, which is stochastic. We choose stride " + }, + { + "bbox": [ + 313, + 332, + 555, + 440 + ], + "type": "inline_equation", + "content": "s = 4" + }, + { + "bbox": [ + 313, + 332, + 555, + 440 + ], + "type": "text", + "content": " for our main results to balance runtime and performance. Note that MonST3R [113] requires 2.41 seconds to process one frame under the same setting, so our method is 1.27 times faster than MonST3R [113]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 451, + 466, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 451, + 466, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 451, + 466, + 464 + ], + "type": "text", + "content": "5. Discussion and Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 471, + 554, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 471, + 554, + 579 + ], + "spans": [ + { + "bbox": [ + 313, + 471, + 554, + 579 + ], + "type": "text", + "content": "We have introduced Geo4D, a novel approach that adapts a video generator for dynamic 4D reconstruction. By building on a pre-trained video generator, Geo4D achieves excellent generalization to real data despite being trained only on synthetic 4D data. We have also demonstrated the benefits of predicting multiple modalities and fusing them at test time via optimization. Our model outperforms state-of-the-art methods on video depth and camera rotation prediction, particularly in challenging dynamic scenes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 579, + 553, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 579, + 553, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 579, + 553, + 628 + ], + "type": "text", + "content": "Despite these successes, our approach has limitations. One is that the point map encoder-decoder is still not entirely accurate, which in turn is a bottleneck for the overall reconstruction quality." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 627, + 553, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 627, + 553, + 675 + ], + "spans": [ + { + "bbox": [ + 313, + 627, + 553, + 675 + ], + "type": "text", + "content": "Our approach also opens a path to integrating 4D geometry into video foundation models, e.g., to generate 3D animations from text, or to provide a more actionable signal when the video model is used as a proxy for a world model." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "content": "Acknowledgments. The authors of this work were supported by Clarendon Scholarship, ERC 101001212-UNION, and EPSRC EP/Z001811/1 SYN3D." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 91, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 91, + 296, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 91, + 296, + 167 + ], + "spans": [ + { + "bbox": [ + 66, + 91, + 296, + 167 + ], + "type": "text", + "content": "[1] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, Jenia Jitsev, Simon Kornblith, Pang Wei Koh, Gabriel Ilharco, Mitchell Wortman, and Ludwig Schmidt. Openflamingo: An opensource framework for training large autoregressive vision-language models, 2023. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 65, + 168, + 296, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 168, + 296, + 201 + ], + "spans": [ + { + "bbox": [ + 65, + 168, + 296, + 201 + ], + "type": "text", + "content": "[2] Michael J. Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. Bedlam: A synthetic dataset of bodies exhibiting detailed lifelike animated motion, 2023. 6, 15" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 65, + 202, + 295, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 202, + 295, + 257 + ], + "spans": [ + { + "bbox": [ + 65, + 202, + 295, + 257 + ], + "type": "text", + "content": "[3] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 65, + 258, + 296, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 258, + 296, + 323 + ], + "spans": [ + { + "bbox": [ + 65, + 258, + 296, + 323 + ], + "type": "text", + "content": "[4] Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 22563-22575, 2023. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 65, + 324, + 295, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 324, + 295, + 368 + ], + "spans": [ + { + "bbox": [ + 65, + 324, + 295, + 368 + ], + "type": "text", + "content": "[5] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In European Conference on Computer Vision (ECCV), 2012. 6, 7, 8, 15, 16" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 65, + 369, + 294, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 369, + 294, + 390 + ], + "spans": [ + { + "bbox": [ + 65, + 369, + 294, + 390 + ], + "type": "text", + "content": "[6] Yohann Cabon, Naila Murray, and Martin Humenberger. Virtual kitti 2, 2020. 6, 15" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 65, + 392, + 295, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 392, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 65, + 392, + 295, + 456 + ], + "type": "text", + "content": "[7] David Charatan, Sizhe Lester Li, Andrea Tagliasacchi, and Vincent Sitzmann. pixelsplat: 3d gaussian splats from image pairs for scalable generalizable 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19457-19467, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 65, + 458, + 295, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 458, + 295, + 501 + ], + "spans": [ + { + "bbox": [ + 65, + 458, + 295, + 501 + ], + "type": "text", + "content": "[8] Xingyu Chen, Yue Chen, Yuliang Xiu, Andreas Geiger, and Anpei Chen. Easi3r: Estimating disentangled motion from dust3r without training. arXiv preprint arXiv:2503.24391, 2025. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 65, + 502, + 295, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 502, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 65, + 502, + 295, + 555 + ], + "type": "text", + "content": "[9] Yuedong Chen, Haofei Xu, Chuanxia Zheng, Bohan Zhuang, Marc Pollefeys, Andreas Geiger, Tat-Jen Cham, and Jianfei Cai. MVSplat: efficient 3d gaussian splattering from sparse multi-view images. arXiv, 2403.14627, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 558, + 295, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 558, + 295, + 612 + ], + "spans": [ + { + "bbox": [ + 61, + 558, + 295, + 612 + ], + "type": "text", + "content": "[10] Yuedong Chen, Chuanxia Zheng, Haofei Xu, Bohan Zhuang, Andrea Vedaldi, Tat-Jen Cham, and Jianfei Cai. Mvsplat360: Feed-forward 360 scene synthesis from sparse views. In Neural Information Processing Systems (NeurIPS), 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 613, + 295, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 613, + 295, + 668 + ], + "spans": [ + { + "bbox": [ + 61, + 613, + 295, + 668 + ], + "type": "text", + "content": "[11] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin Chen, and Silvio Savarese. 3d-r2n2: A unified approach for single and multi-view 3d object reconstruction. In European conference on computer vision (ECCV), pages 628-644. Springer, 2016. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 670, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 670, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 670, + 295, + 713 + ], + "type": "text", + "content": "[12] Wen-Hsuan Chu, Lei Ke, and Katerina Fragkiadaki. Dreamscene4d: Dynamic multi-object scene generation from monocular videos. Advances in Neural Information Processing Systems (NeurIPS), 2024. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 320, + 73, + 553, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 553, + 117 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 553, + 117 + ], + "type": "text", + "content": "[13] Yiquan Duan, Xianda Guo, and Zheng Zhu. Diffusion-depth: Diffusion denoising approach for monocular depth estimation. In European Conference on Computer Vision (ECCV), pages 432-449. Springer, 2024. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 118, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 118, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 320, + 118, + 553, + 183 + ], + "type": "text", + "content": "[14] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. In European Conference on Computer Vision (ECCV), pages 241-258. Springer, 2024. 3, 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 185, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 185, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 320, + 185, + 553, + 239 + ], + "type": "text", + "content": "[15] Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 241, + 553, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 241, + 553, + 307 + ], + "spans": [ + { + "bbox": [ + 320, + 241, + 553, + 307 + ], + "type": "text", + "content": "[16] Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22930-22941, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 308, + 553, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 308, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 308, + 553, + 342 + ], + "type": "text", + "content": "[17] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The KITTI dataset. International Journal of Robotics Research (IJRR), 2013. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 343, + 553, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 343, + 553, + 385 + ], + "spans": [ + { + "bbox": [ + 320, + 343, + 553, + 385 + ], + "type": "text", + "content": "[18] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh r-cnn. In Proceedings of the IEEE/CVF international conference on computer vision (CVPR), pages 9785-9795, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 388, + 553, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 388, + 553, + 453 + ], + "spans": [ + { + "bbox": [ + 320, + 388, + 553, + 453 + ], + "type": "text", + "content": "[19] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. In International Conference on Learning Representations (ICLR), 2024. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 455, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 455, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 320, + 455, + 553, + 498 + ], + "type": "text", + "content": "[20] Junlin Han, Filippos Kokkinos, and Philip Torr. Vfusion3d: Learning scalable 3d generative models from video diffusion models. In European Conference on Computer Vision (ECCV), pages 333-350. Springer, 2024. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 500, + 553, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 500, + 553, + 543 + ], + "spans": [ + { + "bbox": [ + 320, + 500, + 553, + 543 + ], + "type": "text", + "content": "[21] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Neural Information Processing Systems (NeurIPS), 35:8633-8646, 2022. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 544, + 553, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 544, + 553, + 588 + ], + "spans": [ + { + "bbox": [ + 320, + 544, + 553, + 588 + ], + "type": "text", + "content": "[22] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 2, 6, 7, 8, 15" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 590, + 553, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 590, + 553, + 622 + ], + "spans": [ + { + "bbox": [ + 320, + 590, + 553, + 622 + ], + "type": "text", + "content": "[23] Jia-Bin Huang, Sing Bing Kang, Narendra Ahuja, and Johannes Kopf. Temporally coherent completion of dynamic video. In ACM, 2016. 2, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 624, + 553, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 624, + 553, + 678 + ], + "spans": [ + { + "bbox": [ + 320, + 624, + 553, + 678 + ], + "type": "text", + "content": "[24] Matthias Innmann, Michael Zollhöfer, Matthias Nießner, Christian Theobalt, and Marc Stamminger. Volumedeform: Real-time volumetric non-rigid reconstruction. In European conference on computer vision (ECCV), pages 362-379. Springer, 2016. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 681, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 681, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 681, + 553, + 713 + ], + "type": "text", + "content": "[25] Tomas Jakab, Ruining Li, Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Farm3d: Learning articulated 3d animals by distilling 2d diffusion. In 2024 International" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 72, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 81, + 72, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 81, + 72, + 294, + 95 + ], + "type": "text", + "content": "Conference on 3D Vision (3DV), pages 852-861. IEEE, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 97, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 97, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 61, + 97, + 295, + 140 + ], + "type": "text", + "content": "[26] Yanqin Jiang, Chaohui Yu, Chenjie Cao, Fan Wang, Weiming Hu, and Jin Gao. *Animate3d: Animating any 3d model with multi-view video diffusion*. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 143, + 295, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 143, + 295, + 198 + ], + "spans": [ + { + "bbox": [ + 61, + 143, + 295, + 198 + ], + "type": "text", + "content": "[27] Zeren Jiang, Chen Guo, Manuel Kaufmann, Tianjian Jiang, Julien Valentin, Otmar Hilliges, and Jie Song. Multiply: Reconstruction of multiple people from monocular video in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 200, + 294, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 200, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 62, + 200, + 294, + 266 + ], + "type": "text", + "content": "[28] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9492-9502, 2024. 2, 3, 4, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 268, + 294, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 268, + 294, + 312 + ], + "spans": [ + { + "bbox": [ + 62, + 268, + 294, + 312 + ], + "type": "text", + "content": "[29] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 314, + 294, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 314, + 294, + 358 + ], + "spans": [ + { + "bbox": [ + 62, + 314, + 294, + 358 + ], + "type": "text", + "content": "[30] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 360, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 360, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 62, + 360, + 294, + 414 + ], + "type": "text", + "content": "[31] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 417, + 295, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 417, + 295, + 460 + ], + "spans": [ + { + "bbox": [ + 62, + 417, + 295, + 460 + ], + "type": "text", + "content": "[32] Johannes Kopf, Xuejian Rong, and Jia-Bin Huang. Robust consistent video depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1611-1621, 2021. 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 463, + 294, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 463, + 294, + 506 + ], + "spans": [ + { + "bbox": [ + 62, + 463, + 294, + 506 + ], + "type": "text", + "content": "[33] Akshay Krishnan, Xinchen Yan, Vincent Casser, and Abhijit Kundu. Orchid: Image latent diffusion for joint appearance and geometry generation. arXiv preprint arXiv:2501.13087, 2025. 3, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 509, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 509, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 62, + 509, + 294, + 552 + ], + "type": "text", + "content": "[34] Jiahui Lei, Yijia Weng, Adam Harley, Leonidas Guibas, and Kostas Daniilidis. Mosca: Dynamic gaussian fusion from casual videos via 4d motion scaffolds. arXiv preprint arXiv:2405.17421, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 555, + 294, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 555, + 294, + 598 + ], + "spans": [ + { + "bbox": [ + 62, + 555, + 294, + 598 + ], + "type": "text", + "content": "[35] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pages 71-91. Springer, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 601, + 294, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 601, + 294, + 666 + ], + "spans": [ + { + "bbox": [ + 62, + 601, + 294, + 666 + ], + "type": "text", + "content": "[36] Jiahao Li, Hao Tan, Kai Zhang, Zexiang Xu, Fujun Luan, Yinghao Xu, Yicong Hong, Kalyan Sunkavalli, Greg Shakhnarovich, and Sai Bi. Instant3D: Fast text-to-3D with sparse-view generation and large reconstruction model. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 670, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 670, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 670, + 294, + 712 + ], + "type": "text", + "content": "[37] Xuanyi Li, Daquan Zhou, Chenxu Zhang, Shaodong Wei, Qibin Hou, and Ming-Ming Cheng. Sora generates videos with stunning geometrical consistency. arXiv, 2402.17403, 2024. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 320, + 73, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 553, + 128 + ], + "type": "text", + "content": "[38] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 129, + 553, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 129, + 553, + 184 + ], + "spans": [ + { + "bbox": [ + 320, + 129, + 553, + 184 + ], + "type": "text", + "content": "[39] Zhengqi Li, Qianqian Wang, Forrester Cole, Richard Tucker, and Noah Snavely. Dynibar: Neural dynamic image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4273-4284, 2023. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 186, + 553, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 186, + 553, + 251 + ], + "spans": [ + { + "bbox": [ + 320, + 186, + 553, + 251 + ], + "type": "text", + "content": "[40] Zhengqi Li, Richard Tucker, Forrester Cole, Qianqian Wang, Linyi Jin, Vickie Ye, Angjoo Kanazawa, Aleksander Holynski, and Noah Snavely. Megasam: Accurate, fast and robust structure and motion from casual dynamic videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 252, + 553, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 252, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 320, + 252, + 553, + 297 + ], + "type": "text", + "content": "[41] Chen-Hsuan Lin, Chen Kong, and Simon Lucey. Learning efficient point cloud generation for dense 3d object reconstruction. In proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 298, + 553, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 298, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 320, + 298, + 553, + 363 + ], + "type": "text", + "content": "[42] Chen-Hsuan Lin, Jun Gao, Luming Tang, Towaki Takikawa, Xiaohui Zeng, Xun Huang, Karsten Kreis, Sanja Fidler, Ming-Yu Liu, and Tsung-Yi Lin. Magic3d: High-resolution text-to-3d content creation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 300–309, 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 365, + 553, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 365, + 553, + 397 + ], + "spans": [ + { + "bbox": [ + 320, + 365, + 553, + 397 + ], + "type": "text", + "content": "[43] Youtian Lin, Zuozhuo Dai, Siyu Zhu, and Yao Yao. Gaussian-flow: 4d reconstruction with dynamic 3d gaussian particle. In CVPR, 2024. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "spans": [ + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "type": "text", + "content": "[44] Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model, 2024. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 445, + 555, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 445, + 555, + 499 + ], + "spans": [ + { + "bbox": [ + 320, + 445, + 555, + 499 + ], + "type": "text", + "content": "[45] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision (ICCV), pages 9298–9309, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "spans": [ + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "type": "text", + "content": "[46] Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. In The Twelfth International Conference on Learning Representations (ICLR), 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 557, + 553, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 557, + 553, + 632 + ], + "spans": [ + { + "bbox": [ + 320, + 557, + 553, + 632 + ], + "type": "text", + "content": "[47] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9970-9980, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 635, + 553, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 635, + 553, + 657 + ], + "spans": [ + { + "bbox": [ + 320, + 635, + 553, + 657 + ], + "type": "text", + "content": "[48] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR, 2019. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "type": "text", + "content": "[49] Yuanxun Lu, Jingyang Zhang, Tian Fang, Jean-Daniel Nahmias, Yanghai Tsin, Long Quan, Xun Cao, Yao Yao, and Shiwei Li. Matrix3d: Large photogrammetry model all-in-one. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3, 4" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 72, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 61, + 72, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 72, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 61, + 72, + 294, + 127 + ], + "type": "text", + "content": "[50] Lukas Mehl, Jenny Schmalfuss, Azin Jahedi, Yaroslava Nalivayko, and Andres Bruhn. Spring: A high-resolution high-detail dataset and benchmark for scene flow, optical flow and stereo. In Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 6, 15" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 129, + 294, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 129, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 61, + 129, + 294, + 183 + ], + "type": "text", + "content": "[51] Luke Melas-Kyriazi, Iro Laina, Christian Rupprecht, and Andrea Vedaldi. Realfusion: 360deg reconstruction of any object from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 8446-8455, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 184, + 295, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 184, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 62, + 184, + 295, + 228 + ], + "type": "text", + "content": "[52] B Mildenhall, PP Srinivasan, M Tancik, JT Barron, R Ramamoorthi, and R Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 228, + 294, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 228, + 294, + 282 + ], + "spans": [ + { + "bbox": [ + 62, + 228, + 294, + 282 + ], + "type": "text", + "content": "[53] Richard A Newcombe, Dieter Fox, and Steven M Seitz. Dynamicfusion: Reconstruction and tracking of non-rigid scenes in real-time. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 343-352, 2015. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 284, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 284, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 62, + 284, + 294, + 525 + ], + "type": "text", + "content": "[54] NVIDIA, Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, Daniel Dworakowski, Jiaojiao Fan, Michele Fenzi, Francesco Ferroni, Sanja Fidler, Dieter Fox, Songwei Ge, Yunhao Ge, Jinwei Gu, Siddharth Gururani, Ethan He, Jiahui Huang, Jacob Huffman, Pooya Jannaty, Jingyi Jin, Seung Wook Kim, Gergely Klár, Grace Lam, Shiyi Lan, Laura Leal-Taixe, Anqi Li, Zhaoshuo Li, Chen-Hsuan Lin, Tsung-Yi Lin, Huan Ling, Ming-Yu Liu, Xian Liu, Alice Luo, Qianli Ma, Hanzi Mao, Kaichun Mo, Arsalan Mousavian, Seungjun Nah, Sriharsha Niverty, David Page, Despoina Paschalidou, Zeeshan Patel, Lindsey Pavao, Morteza Ramezanali, Fitsum Reda, Xiaowei Ren, Vasanth Rao Naik Sabavat, Ed Schmerling, Stella Shi, Bartosz Stefaniak, Shitao Tang, Lyne Tchapmi, Przemek Tredak, Wei-Cheng Tseng, Jibin Varghese, Hao Wang, Haoxiang Wang, Heng Wang, Ting-Chun Wang, Fangyin Wei, Xinyue Wei, Jay Zhangjie Wu, Jiashu Xu, Wei Yang, Lin Yen-Chen, Xiaohui Zeng, Yu Zeng, Jing Zhang, Qinsheng Zhang, Yuxuan Zhang, Qingqing Zhao and Artur Zolkowski. Cosmos world foundation model platform for physical ai. arXiv, 2501.03575, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 525, + 294, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 525, + 294, + 590 + ], + "spans": [ + { + "bbox": [ + 62, + 525, + 294, + 590 + ], + "type": "text", + "content": "[55] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguère, and C. Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862, 2019. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 592, + 294, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 592, + 294, + 656 + ], + "spans": [ + { + "bbox": [ + 62, + 592, + 294, + 656 + ], + "type": "text", + "content": "[56] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 658, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 658, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 658, + 294, + 713 + ], + "type": "text", + "content": "[57] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 2" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 320, + 73, + 553, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 553, + 138 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 553, + 138 + ], + "type": "text", + "content": "[58] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: a higher-dimensional representation for topologically varying neural radiance fields. ACM Transactions on Graphics (TOG), 40 (6):1-12, 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 320, + 140, + 553, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 140, + 553, + 250 + ], + "spans": [ + { + "bbox": [ + 320, + 140, + 553, + 250 + ], + "type": "text", + "content": "[59] Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjoyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Rocktäschel. Genie 2: A large-scale foundation world model, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 320, + 251, + 553, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 251, + 553, + 296 + ], + "spans": [ + { + "bbox": [ + 320, + 251, + 553, + 296 + ], + "type": "text", + "content": "[60] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European conference on computer vision (ECCV), pages 523-540. Springer, 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 320, + 297, + 553, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 297, + 553, + 341 + ], + "spans": [ + { + "bbox": [ + 320, + 297, + 553, + 341 + ], + "type": "text", + "content": "[61] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 320, + 342, + 553, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 342, + 553, + 397 + ], + "spans": [ + { + "bbox": [ + 320, + 342, + 553, + 397 + ], + "type": "text", + "content": "[62] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 399, + 553, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 399, + 553, + 475 + ], + "spans": [ + { + "bbox": [ + 320, + 399, + 553, + 475 + ], + "type": "text", + "content": "[63] Lingteng Qiu, Guanying Chen, Xiaodong Gu, Qi Zuo, Mutian Xu, Yushuang Wu, Weihao Yuan, Zilong Dong, Liefeng Bo, and Xiaoguang Han. Richdreamer: A generalizable normal-depth diffusion model for detail richness in text-to-3d. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 9914–9925, 2024. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 477, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 477, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 320, + 477, + 553, + 544 + ], + "type": "text", + "content": "[64] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning (ICML), pages 8748-8763. PmLR, 2021. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 545, + 553, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 545, + 553, + 599 + ], + "spans": [ + { + "bbox": [ + 320, + 545, + 553, + 599 + ], + "type": "text", + "content": "[65] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 44:1623-1637, 2019. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 601, + 553, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 601, + 553, + 656 + ], + "spans": [ + { + "bbox": [ + 320, + 601, + 553, + 656 + ], + "type": "text", + "content": "[66] Jiawei Ren, Kevin Xie, Ashkan Mirzaei, Hanxue Liang, Xiaohui Zeng, Karsten Kreis, Ziwei Liu, Antonio Torralba, Sanja Fidler, Seung Wook Kim, and Huan Ling. L4gm: Large 4d gaussian reconstruction model. Advances in Neural Information Processing Systems (NeurIPS), 2024. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 553, + 713 + ], + "type": "text", + "content": "[67] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 4" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 61, + 72, + 297, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 72, + 297, + 149 + ], + "spans": [ + { + "bbox": [ + 61, + 72, + 297, + 149 + ], + "type": "text", + "content": "[68] Kyle Sargent, Zizhang Li, Tanmay Shah, Charles Herrmann, Hong-Xing Yu, Yunzhi Zhang, Eric Ryan Chan, Dmitry Lagun, Li Fei-Fei, Deqing Sun, et al. Zeronvs: Zero-shot 360-degree view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9420–9429, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 151, + 297, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 151, + 297, + 217 + ], + "spans": [ + { + "bbox": [ + 61, + 151, + 297, + 217 + ], + "type": "text", + "content": "[69] Saurabh Saxena, Charles Herrmann, Junhwa Hur, Abhishek Kar, Mohammad Norouzi, Deqing Sun, and David J Fleet. The surprising effectiveness of diffusion models for optical flow and monocular depth estimation. Advances in Neural Information Processing Systems (NeurIPS), 36:39443-39469, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 218, + 296, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 218, + 296, + 262 + ], + "spans": [ + { + "bbox": [ + 61, + 218, + 296, + 262 + ], + "type": "text", + "content": "[70] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. arXiv, 2406.01493, 2024. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 263, + 296, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 263, + 296, + 308 + ], + "spans": [ + { + "bbox": [ + 61, + 263, + 296, + 308 + ], + "type": "text", + "content": "[71] Yichun Shi, Peng Wang, Jianglong Ye, Long Mai, Kejie Li, and Xiao Yang. MVDream: Multi-view diffusion for 3d generation. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 308, + 296, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 308, + 296, + 374 + ], + "spans": [ + { + "bbox": [ + 61, + 308, + 296, + 374 + ], + "type": "text", + "content": "[72] Yawar Siddiqui, Antonio Alliegro, Alexey Artemov, Tatiana Tommasi, Daniele Sirigatti, Vladislav Rosov, Angela Dai, and Matthias Nießner. Meshgpt: Generating triangle meshes with decoder-only transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19615-19625, 2024. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 376, + 296, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 376, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 61, + 376, + 296, + 441 + ], + "type": "text", + "content": "[73] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, Devi Parikh, Sonal Gupta, and Yaniv Taigman. Make-a-video: Text-to-video generation without text-video data. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 443, + 296, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 443, + 296, + 507 + ], + "spans": [ + { + "bbox": [ + 61, + 443, + 296, + 507 + ], + "type": "text", + "content": "[74] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2437-2446, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 510, + 296, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 510, + 296, + 565 + ], + "spans": [ + { + "bbox": [ + 61, + 510, + 296, + 565 + ], + "type": "text", + "content": "[75] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems (NeurIPS), 34:19313-19325, 2021. 2, 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 567, + 296, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 567, + 296, + 609 + ], + "spans": [ + { + "bbox": [ + 61, + 567, + 296, + 609 + ], + "type": "text", + "content": "[76] Brandon Smart, Chuanxia Zheng, Iro Laina, and Victor Adrian Prisacariu. Splatt3r: Zero-shot gaussian splatting from uncalibrated image pairs. arXiv preprint arXiv:2408.13912, 2024. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 612, + 296, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 612, + 296, + 634 + ], + "spans": [ + { + "bbox": [ + 61, + 612, + 296, + 634 + ], + "type": "text", + "content": "[77] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 635, + 296, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 635, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 61, + 635, + 296, + 689 + ], + "type": "text", + "content": "[78] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of rgb-d slam systems. 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 573-580, 2012. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 691, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 691, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 691, + 296, + 713 + ], + "type": "text", + "content": "[79] Stanislaw Szymanowicz, Christian Rupprecht, and Andrea Vedaldi. Splatter Image: Ultra-fast single-view 3D recon" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 338, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 338, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 338, + 73, + 553, + 95 + ], + "type": "text", + "content": "struction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 97, + 555, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 97, + 555, + 162 + ], + "spans": [ + { + "bbox": [ + 320, + 97, + 555, + 162 + ], + "type": "text", + "content": "[80] Stanislaw Szymanowicz, Eldar Insafutdinov, Chuanxia Zheng, Dylan Campbell, João F. Henriques, Christian Rupprecht, and Andrea Vedaldi. Flash3D: Feed-forward generalisable 3D scene reconstruction from a single image. In Proceedings of the International Conference on 3D Vision (3DV), 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 163, + 555, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 163, + 555, + 217 + ], + "spans": [ + { + "bbox": [ + 320, + 163, + 555, + 217 + ], + "type": "text", + "content": "[81] Stanislaw Szymanowicz, Jason Y Zhang, Pratul Srinivasan, Ruiqi Gao, Arthur Brussee, Aleksander Holynski, Ricardo Martin-Brualla, Jonathan T Barron, and Philipp Henzler. Bolt3d: Generating 3d scenes in seconds. arXiv preprint arXiv:2503.14445, 2025. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 220, + 554, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 220, + 554, + 274 + ], + "spans": [ + { + "bbox": [ + 320, + 220, + 554, + 274 + ], + "type": "text", + "content": "[82] Aether Team, Haoyi Zhu, Yifan Wang, Jianjun Zhou, Wenzheng Chang, Yang Zhou, Zizun Li, Junyi Chen, Chunhua Shen, Jiangmiao Pang, and Tong He. Aether: Geometric-aware unified world modeling. arXiv preprint arXiv:2503.18945, 2025. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 276, + 554, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 276, + 554, + 330 + ], + "spans": [ + { + "bbox": [ + 320, + 276, + 554, + 330 + ], + "type": "text", + "content": "[83] Shubham Tulsiani, Tinghui Zhou, Alexei A Efros, and Jitendra Malik. Multi-view supervision for single-view reconstruction via differentiable ray consistency. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2626-2634, 2017. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 332, + 554, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 332, + 554, + 374 + ], + "spans": [ + { + "bbox": [ + 320, + 332, + 554, + 374 + ], + "type": "text", + "content": "[84] S. Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis and Machine Intelligence, 13(4):376-380, 1991. 15" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 376, + 554, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 376, + 554, + 442 + ], + "spans": [ + { + "bbox": [ + 320, + 376, + 554, + 442 + ], + "type": "text", + "content": "[85] Vikram Voleti, Chun-Han Yao, Mark Boss, Adam Letts, David Pankratz, Dmitry Tochilkin, Christian Laforte, Robin Rombach, and Varun Jampani. Sv3d: Novel multi-view synthesis and 3d generation from a single image using latent video diffusion. In European Conference on Computer Vision (ECCV), pages 439-457. Springer, 2024. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 444, + 554, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 444, + 554, + 476 + ], + "spans": [ + { + "bbox": [ + 320, + 444, + 554, + 476 + ], + "type": "text", + "content": "[86] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. In International Conference on 3D Vision (3DV), 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 478, + 554, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 478, + 554, + 542 + ], + "spans": [ + { + "bbox": [ + 320, + 478, + 554, + 542 + ], + "type": "text", + "content": "[87] Haochen Wang, Xiaodan Du, Jiahao Li, Raymond A Yeh, and Greg Shakhnarovich. Score jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12619-12629, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 544, + 554, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 544, + 554, + 578 + ], + "spans": [ + { + "bbox": [ + 320, + 544, + 554, + 578 + ], + "type": "text", + "content": "[88] Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 579, + 554, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 579, + 554, + 633 + ], + "spans": [ + { + "bbox": [ + 320, + 579, + 554, + 633 + ], + "type": "text", + "content": "[89] Jianyuan Wang, Minghao Chen, Nikita Karaev, Andrea Vedaldi, Christian Rupprecht, and David Novotny. VGGT: Visual geometry grounded network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 635, + 554, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 635, + 554, + 689 + ], + "spans": [ + { + "bbox": [ + 320, + 635, + 554, + 689 + ], + "type": "text", + "content": "[90] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), pages 52-67, 2018. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 691, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 691, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 691, + 554, + 713 + ], + "type": "text", + "content": "[91] Qianqian Wang, Vickie Ye, Hang Gao, Weijia Zeng, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 73, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "text", + "content": "motion: 4d reconstruction from a single video. In arXiv preprint arXiv:2407.13764, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 96, + 295, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 96, + 295, + 151 + ], + "spans": [ + { + "bbox": [ + 61, + 96, + 295, + 151 + ], + "type": "text", + "content": "[92] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. Dust3r: Geometric 3d vision made easy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20697-20709, 2024. 1, 2, 3, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 152, + 294, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 152, + 294, + 217 + ], + "spans": [ + { + "bbox": [ + 61, + 152, + 294, + 217 + ], + "type": "text", + "content": "[93] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian A. Scherer. Tartanair: A dataset to push the limits of visual slam. 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4909-4916, 2020. 6, 15" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 220, + 294, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 220, + 294, + 283 + ], + "spans": [ + { + "bbox": [ + 62, + 220, + 294, + 283 + ], + "type": "text", + "content": "[94] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems (NeurIPS), 36:7594-7611, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 287, + 294, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 287, + 294, + 319 + ], + "spans": [ + { + "bbox": [ + 62, + 287, + 294, + 319 + ], + "type": "text", + "content": "[95] Yiran Wang, Min Shi, Jiaqi Li, Zihao Huang, Zhiguo Cao, Jianming Zhang, Ke Xian, and Guosheng Lin. Neural video depth stabilizer. In ICCV, 2023. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 321, + 294, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 321, + 294, + 375 + ], + "spans": [ + { + "bbox": [ + 62, + 321, + 294, + 375 + ], + "type": "text", + "content": "[96] Zhengyi Wang, Cheng Lu, Yikai Wang, Fan Bao, Chongxuan Li, Hang Su, and Jun Zhu. Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. Advances in Neural Information Processing Systems (NeurIPS), 36, 2024. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 377, + 294, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 377, + 294, + 431 + ], + "spans": [ + { + "bbox": [ + 62, + 377, + 294, + 431 + ], + "type": "text", + "content": "[97] Daniel Watson, William Chan, Ricardo Martin Brulla, Jonathan Ho, Andrea Tagliasacchi, and Mohammad Norouzi. Novel view synthesis with diffusion models. In The Eleventh International Conference on Learning Representations (ICLR), 2023. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 434, + 294, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 434, + 294, + 487 + ], + "spans": [ + { + "bbox": [ + 62, + 434, + 294, + 487 + ], + "type": "text", + "content": "[98] Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain BRÉGIER, Yohann Cabon, Vaibhav ARORA, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jerome Revaud. CroCo: self-supervised pre-training for 3D vision tasks by cross-view completion. In Proc. NeurIPS, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 490, + 294, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 490, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 62, + 490, + 294, + 533 + ], + "type": "text", + "content": "[99] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In CVPR, 2024. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 57, + 535, + 295, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 535, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 57, + 535, + 295, + 590 + ], + "type": "text", + "content": "[100] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 1-10, 2020. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 591, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 591, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 57, + 591, + 295, + 645 + ], + "type": "text", + "content": "[101] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 647, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 295, + 712 + ], + "type": "text", + "content": "[102] Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. Dynamiccafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision (ECCV), pages 399-417. Springer, 2024. 1, 3, 4, 6" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "text", + "content": "[103] Tian-Xing Xu, Xiangjun Gao, Wenbo Hu, Xiaoyu Li, Song-Hai Zhang, and Ying Shan. Geometrycrafter: Consistent geometry estimation for open-world videos with diffusion priors, 2025. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 118, + 555, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 555, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 555, + 182 + ], + "type": "text", + "content": "[104] Jianing Yang, Alexander Sax, Kevin J. Liang, Mikael Henaff, Hao Tang, Ang Cao, Joyce Chai, Franziska Meier, and Matt Feiszli. Fast3r: Towards 3d reconstruction of " + }, + { + "bbox": [ + 317, + 118, + 555, + 182 + ], + "type": "inline_equation", + "content": "1000+" + }, + { + "bbox": [ + 317, + 118, + 555, + 182 + ], + "type": "text", + "content": " images in one forward pass. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 184, + 553, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 184, + 553, + 225 + ], + "spans": [ + { + "bbox": [ + 317, + 184, + 553, + 225 + ], + "type": "text", + "content": "[105] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Ji-ashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proc. CVPR, 2024. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 228, + 553, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 228, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 228, + 553, + 270 + ], + "type": "text", + "content": "[106] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. Advances in Neural Information Processing Systems (NeurIPS), 2024. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 272, + 553, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 272, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 317, + 272, + 553, + 335 + ], + "type": "text", + "content": "[107] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20331-20341, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 338, + 553, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 338, + 553, + 370 + ], + "spans": [ + { + "bbox": [ + 317, + 338, + 553, + 370 + ], + "type": "text", + "content": "[108] David Yifan Yao, Albert J. Zhai, and Shenlong Wang. Uni4d: Unifying visual foundation models for 4d modeling from a single video, 2025. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 372, + 553, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 372, + 553, + 425 + ], + "spans": [ + { + "bbox": [ + 317, + 372, + 553, + 425 + ], + "type": "text", + "content": "[109] Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024.3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 426, + 553, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 426, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 317, + 426, + 553, + 480 + ], + "type": "text", + "content": "[110] Xumin Yu, Yongming Rao, Ziyi Wang, Zuyan Liu, Jiwen Lu, and Jie Zhou. Pointr: Diverse point cloud completion with geometry-aware transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12498-12507, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 482, + 553, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 482, + 553, + 514 + ], + "spans": [ + { + "bbox": [ + 317, + 482, + 553, + 514 + ], + "type": "text", + "content": "[111] Yuheng Yuan, Qiuhong Shen, Xingyi Yang, and Xinchao Wang. 1000+ fps 4d gaussian splatting for dynamic scene rendering, 2025. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 515, + 553, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 515, + 553, + 569 + ], + "spans": [ + { + "bbox": [ + 317, + 515, + 553, + 569 + ], + "type": "text", + "content": "[112] David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. International Journal of Computer Vision (IJCV), pages 1-15, 2024. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 571, + 553, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 571, + 553, + 634 + ], + "spans": [ + { + "bbox": [ + 317, + 571, + 553, + 634 + ], + "type": "text", + "content": "[113] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. In International Conference on Learning Representations (ICLR), 2025. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 636, + 553, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 636, + 553, + 679 + ], + "spans": [ + { + "bbox": [ + 317, + 636, + 553, + 679 + ], + "type": "text", + "content": "[114] Jason Y Zhang, Amy Lin, Moneish Kumar, Tzu-Hsuan Yang, Deva Ramanan, and Shubham Tulsiani. Cameras as rays: Pose estimation via ray diffusion. In International Conference on Learning Representations (ICLR), 2024. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 681, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 681, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 681, + 553, + 712 + ], + "type": "text", + "content": "[115] Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiatao Gu. World-consistent video diffusion with explicit 3d" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 295, + 297 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "text", + "content": "modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "text", + "content": "[116] Zhoutong Zhang, Forrester Cole, Zhengqi Li, Michael Rubinstein, Noah Snavely, and William T. Freeman. Structure and motion from casual videos. In European Conference on Computer Vision (ECCV), 2022. 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 141, + 294, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 294, + 195 + ], + "type": "text", + "content": "[117] Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. Unleashing text-to-image diffusion models for visual perception. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5729-5739, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 197, + 294, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 241 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 241 + ], + "type": "text", + "content": "[118] Chuanxia Zheng and Andrea Vedaldi. Free3d: Consistent novel view synthesis without 3d representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9720-9731, 2024. 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 242, + 294, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 242, + 294, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 242, + 294, + 297 + ], + "type": "text", + "content": "[119] Yang Zheng, Adam W Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 19855-19865, 2023. 6, 15" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 65, + 68, + 545, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 68, + 545, + 110 + ], + "spans": [ + { + "bbox": [ + 65, + 68, + 545, + 110 + ], + "type": "text", + "content": "Geo4D: Leveraging Video Generators for Geometric 4D Scene Reconstruction Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 124, + 296, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 124, + 296, + 173 + ], + "spans": [ + { + "bbox": [ + 55, + 124, + 296, + 173 + ], + "type": "text", + "content": "In this supplementary material, we provide additional information to supplement our main submission. The code is available here for research purposes: github.com/ jzr99/Geo4D" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 199, + 191, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 199, + 191, + 213 + ], + "spans": [ + { + "bbox": [ + 55, + 199, + 191, + 213 + ], + "type": "text", + "content": "6. Implementation Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 224, + 157, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 224, + 157, + 237 + ], + "spans": [ + { + "bbox": [ + 55, + 224, + 157, + 237 + ], + "type": "text", + "content": "6.1. Training Dataset" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 247, + 296, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 247, + 296, + 378 + ], + "spans": [ + { + "bbox": [ + 55, + 247, + 296, + 378 + ], + "type": "text", + "content": "As shown in Tab. 5, we use five synthetic datasets for training: Spring [50], BEDLAM [2], PointOdyssey [119], TarTanAir [93], and VirtualKitti [6]. Although all datasets are synthetic, we found that some depth pixels are missing in PointOdyssey [119]. To address this, we apply max pooling to inpaint the missing pixels. During training, we sample each dataset according to the ratios in Tab. 5. For each sample, we select 16 frames from the sequence, with the sampling stride randomly chosen from " + }, + { + "bbox": [ + 55, + 247, + 296, + 378 + ], + "type": "inline_equation", + "content": "\\{1,2,3\\}" + }, + { + "bbox": [ + 55, + 247, + 296, + 378 + ], + "type": "text", + "content": " to allow our diffusion model to adapt to input videos with various frame rates." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 402, + 175, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 402, + 175, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 402, + 175, + 415 + ], + "type": "text", + "content": "6.2. Optimization Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": "The overall optimization process is outlined in Algorithm 1. We first predict all three modality maps using our diffusion model for each video clip " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": ". The predicted point maps are then roughly aligned based on the overlapping frames using the Umayama algorithm [84]. The camera intrinsic " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{K}^k" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " is initialized by minimizing the projection error of the point map " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "X^{k,g^k}" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " in its reference (first) frame " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " within each window group " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "g^k" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": ". The camera extrinsics are then initialized using the RANSAC PnP algorithm. In the first stage of optimization, the point maps are roughly disentangled into camera pose and depth map. The disparity map is then aligned with the global depth inferred from point maps by solving Eq. (5) from the main paper to obtain the scale and shift parameters. The camera parameters extracted from the predicted ray map are aligned with the global camera trajectory based on the reference (first) frame of each video clip " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " via Eq. (8) from the main paper. After initializing all the alignment learnable parameters, including rotation " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{*}^{g}" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": ", scale " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{*}^{g}" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": ", and shift " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\beta_{*}^{g}" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " across different modalities, where " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "* \\in \\{\\mathrm{p},\\mathrm{d},\\mathrm{c}\\}" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": ", we jointly optimize all the learnable parameters by Eq. (10). Specifically, we set the weights for each loss term in Eq. (10) as " + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\alpha_{1} = 1, \\alpha_{2} = 2, \\alpha_{3} = 0.005, \\alpha_{4} = 0.015" + }, + { + "bbox": [ + 55, + 425, + 296, + 713 + ], + "type": "text", + "content": " to roughly equalize the scale of the different losses." + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 315, + 123, + 553, + 322 + ], + "blocks": [ + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "lines": [ + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "spans": [ + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": "Algorithm 1 Multi-Modal Alignment Optimization \n1: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "X^{i,g}, D^{i,g}, r^{i,g} \\gets" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " Predicted by our diffusion model \n2: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g} \\gets" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " Initialized by Umayama algorithm \n3: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "K_{\\mathrm{p}}^{k} \\gets" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " Optimized from " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "X^{k,g^k}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " \n4: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i} \\gets" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " Initialized by Ransac PnP from pointmaps " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "X^i" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " \n5: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{c}}^{i,g}, o_{\\mathrm{c}}^{i,g} \\gets" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " Initialized by Eqs. (6) and (7) from raymaps " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "r^{i,g}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " \n6: repeat \n7: if Iteration = Align start iteration then \n8: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{d}}^{g}, \\beta_{\\mathrm{d}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{d}}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " (Eq. (5)) \n9: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{c}}^{g}, \\lambda_{\\mathrm{c}}^{g}, \\beta_{\\mathrm{c}}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{c}}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " (Eq. (8)) \n10: else if Iteration < Align start iteration then \n11: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{\\mathrm{p}}^{g}, R_{\\mathrm{p}}^{g}, \\beta_{\\mathrm{p}}^{g}, \\gets \\arg \\min \\mathcal{L}_{\\mathrm{p}} + \\mathcal{L}_{\\mathrm{s}}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " \n12: else \n13: " + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{p}}^{i}, K_{\\mathrm{p}}^{i}, R_{\\mathrm{p}}^{i}, o_{\\mathrm{p}}^{i}, \\lambda_{*}^{g}, R_{*}^{g}, \\beta_{*}^{g} \\gets \\arg \\min \\mathcal{L}_{\\mathrm{all}}" + }, + { + "bbox": [ + 315, + 123, + 553, + 322 + ], + "type": "text", + "content": " \n14: end if \n15: until max loop reached" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "algorithm" + }, + { + "type": "table", + "bbox": [ + 315, + 341, + 553, + 407 + ], + "blocks": [ + { + "bbox": [ + 315, + 341, + 553, + 407 + ], + "lines": [ + { + "bbox": [ + 315, + 341, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 315, + 341, + 553, + 407 + ], + "type": "table", + "html": "
DatasetScene type#Frames#SequencesRatio
PointOdyssey [119]Indoors/Outdoors200K13116.7%
TartanAir [93]Indoors/Outdoors1000K16316.7%
Spring [50]Outdoors6K3716.7%
VirtualKITTI [6]Driving43K32016.7%
BEDLAM [2]Indoors/Outdoors380K10K33.3%
", + "image_path": "865e847eb30e6f45bfa98558af28b890a5b797f29d27a850c9a4f209048b3886.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 315, + 448, + 553, + 525 + ], + "blocks": [ + { + "bbox": [ + 314, + 409, + 553, + 430 + ], + "lines": [ + { + "bbox": [ + 314, + 409, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 314, + 409, + 553, + 430 + ], + "type": "text", + "content": "Table 5. Details of training datasets. Our method only uses synthetic datasets for training." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 448, + 553, + 525 + ], + "lines": [ + { + "bbox": [ + 315, + 448, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 315, + 448, + 553, + 525 + ], + "type": "table", + "html": "
StepsVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
10.22170.70.2340.0720.753
50.20573.50.1850.0630.547
100.20773.20.2120.0710.508
250.22072.20.2110.0740.564
", + "image_path": "3a46001fcd7f623e3cf8dd49722f9a3f098ee6bcf7a68c89fbf981cd929c42a0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 525, + 553, + 548 + ], + "lines": [ + { + "bbox": [ + 313, + 525, + 553, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 553, + 548 + ], + "type": "text", + "content": "Table 6. Ablation study for the DDIM sampling steps. on the Sintel [5] dataset." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 574, + 430, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 574, + 430, + 587 + ], + "spans": [ + { + "bbox": [ + 314, + 574, + 430, + 587 + ], + "type": "text", + "content": "7. Additional Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 597, + 525, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 525, + 610 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 525, + 610 + ], + "type": "text", + "content": "7.1. Ablating the Number of Denoising Steps" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": "We study the influence of the number of denoising steps during inference. As shown in Tab. 6, the model achieves optimal performance after around 5 steps. Compared to the video generation task, where a larger number of denoising steps usually produces a more detailed generated video, 4D reconstruction is a more deterministic task, which requires fewer steps. Similar phenomena are also observed in [22], which uses a video generator for video depth estimation." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 558, + 548 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 558, + 548 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 558, + 548 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 558, + 548 + ], + "type": "image", + "image_path": "235dd588867b0f447d916c89bdee70030354dd2db6dddad26a1dda0b7431f433.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 560, + 555, + 583 + ], + "lines": [ + { + "bbox": [ + 55, + 560, + 555, + 583 + ], + "spans": [ + { + "bbox": [ + 55, + 560, + 555, + 583 + ], + "type": "text", + "content": "Figure 5. Additional qualitative results. Our method generalizes well to various scenes with different 4D objects and performs robustly against different camera and object motions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 601, + 294, + 652 + ], + "blocks": [ + { + "bbox": [ + 56, + 601, + 294, + 652 + ], + "lines": [ + { + "bbox": [ + 56, + 601, + 294, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 294, + 652 + ], + "type": "table", + "html": "
MethodVideo DepthCamera Pose
Abs Rel ↓δ < 1.25 ↑ATE ↓RPE trans ↓RPE rot ↓
w/o fine-tuned0.21272.10.1920.0610.577
w fine-tuned0.20573.50.1850.0630.547
", + "image_path": "a3bd301507763e8287ae60b8ae2ce0a502a3f782186fa27e1fb10da1c54d0b3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 653, + 295, + 686 + ], + "lines": [ + { + "bbox": [ + 55, + 653, + 295, + 686 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 295, + 686 + ], + "type": "text", + "content": "Table 7. Ablation study for the fine-tuned point map VAE on the Sintel [5] dataset. The fine-tuned point map VAE performs better than the original one." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 603, + 555, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 603, + 555, + 617 + ], + "spans": [ + { + "bbox": [ + 314, + 603, + 555, + 617 + ], + "type": "text", + "content": "7.2. Ablation Study for Fine-Tuned Point Map VAE" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "content": "As stated in the main paper, we added an additional branch to predict the uncertainty for our point map VAE and fine-tuned it based on Eq. 3. We perform an ablation study on our fine-tuning strategy. As shown in Tab. 7, our fine-tuned point map VAE achieves consistently better performance on both video depth estimation and camera pose estimation tasks compared with the original pre-trained image VAE," + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 295, + 163 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 295, + 163 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 295, + 163 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 295, + 163 + ], + "type": "image", + "image_path": "8c9b162c93eb332a31fde267e9a0e93ffc3f40492cd2dbd264eb848306b45339.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 171, + 293, + 182 + ], + "lines": [ + { + "bbox": [ + 58, + 171, + 293, + 182 + ], + "spans": [ + { + "bbox": [ + 58, + 171, + 293, + 182 + ], + "type": "text", + "content": "Figure 6. Visualization of different geometric modality maps." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 195, + 295, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 195, + 295, + 218 + ], + "spans": [ + { + "bbox": [ + 55, + 195, + 295, + 218 + ], + "type": "text", + "content": "demonstrating the necessity and effectiveness of our finetuning strategy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 227, + 266, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 227, + 266, + 239 + ], + "spans": [ + { + "bbox": [ + 55, + 227, + 266, + 239 + ], + "type": "text", + "content": "7.3. Analysis of Multi-Modal Representation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 243, + 296, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 243, + 296, + 460 + ], + "spans": [ + { + "bbox": [ + 55, + 243, + 296, + 460 + ], + "type": "text", + "content": "Point maps (PMs) and disparity maps (DMs) are complementary. DMs better represent near objects, while PMs are more depth-agnostic (e.g., human vs house in Fig. 6 (b,c)). As in prior work, DMs are affine invariant (which here makes them range-compatible with the pretrained RGB VAE); their scale and shift, needed to recover undistorted geometry, are inferred by matching them to the predicted PMs. Ray maps (RMs) help infer the camera pose when PMs fail to represent points at infinity (such as the sky in Fig. 6 (e)). We observed that PMs tend to be noisier than DMs, so we prioritized modeling the PMs' uncertainty. Per-pixel uncertainty for ray maps are less meaningful given the high degree of correlation between individual rays. During multi-modal alignment, we align global point clouds with DMs in disparity space and with PMs in linear space. This naturally gives more weight to near points, which tend to be estimated well by DMs, and weighs points based on uncertainty with PMs, thus taking advantage of both modalities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 469, + 138, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 469, + 138, + 481 + ], + "spans": [ + { + "bbox": [ + 55, + 469, + 138, + 481 + ], + "type": "text", + "content": "8. Visualization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 490, + 296, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 490, + 296, + 537 + ], + "spans": [ + { + "bbox": [ + 55, + 490, + 296, + 537 + ], + "type": "text", + "content": "Figure 5 shows additional visualizations for indoor, outdoor, and driving scenes. Although our model is only trained on synthetic datasets, it generalizes to real-world data with diverse objects and motions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 548, + 130, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 130, + 560 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 130, + 560 + ], + "type": "text", + "content": "9. Limitations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 568, + 295, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 295, + 712 + ], + "type": "text", + "content": "Although our method performs well and generalizes to a wide range of in-the-wild videos, it can struggle in cases involving significant changes in focal length or extreme camera motion throughout a sequence. This limitation likely stems from the lack of focal length variation in our training data. Incorporating more sequences with diverse camera movements and zooming effects could help mitigate this issue. Moreover, due to the inherent temporal attention mechanism in our network architecture, our approach currently supports only monocular video input. Extending the method to handle multi-view images or videos is a promising direction for future work." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_content_list.json b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a85d408e909f2c8a32d6273f7599d804334f94de --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_content_list.json @@ -0,0 +1,1822 @@ +[ + { + "type": "text", + "text": "PixelFlow: Pixel-Space Generative Models with Flow", + "text_level": 1, + "bbox": [ + 230, + 130, + 767, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shoufa Chen1 Chongjian Ge1,2 Shilong Zhang1 Peize Sun1 Ping Luo1 \n1The University of Hong Kong 2Adobe", + "bbox": [ + 181, + 179, + 810, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 253, + 326, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present PixelFlow, a family of image generation models that operate directly in the raw pixel space, in contrast to the predominant latent-space models. This approach simplifies the image generation process by eliminating the need for a pre-trained Variational Autoencoder (VAE) and enabling the whole model end-to-end trainable. Through efficient cascade flow modeling, PixelFlow achieves affordable computation cost in pixel space. It achieves an FID of 1.98 on $256 \\times 256$ ImageNet class-conditional image generation benchmark. The qualitative text-to-image results demonstrate that PixelFlow excels in image quality, artistry, and semantic control. We hope this new paradigm will inspire and open up new opportunities for next-generation visual generation models. Code and models are available at https://github.com/ShoufaChen/PixelFlow.", + "bbox": [ + 86, + 285, + 483, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 542, + 222, + 558 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Numquam ponenda est pluralitas sine necessitate.", + "bbox": [ + 122, + 579, + 452, + 594 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "William of Ockham", + "bbox": [ + 323, + 606, + 473, + 619 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Driven by the success of the Stable Diffusion (SD) model series [17, 46, 47, 50], latent diffusion models (LDMs) [50] have emerged as the de facto standard for generative modeling across diverse modalities, spanning image [17, 35, 45], video [7, 8, 23, 66, 69], audio [18, 39], and 3D [57, 67]. As shown in Figure 1 (a), LDMs compress raw data into a compact latent space using pre-trained Variencoders (VAEs). This compression reduces computational demands and facilitates efficient diffusion denoising. Despite their widespread success, LDMs decouple the VAE and diffusion components, hindering joint optimization and complicating holistic diagnosis.", + "bbox": [ + 89, + 643, + 482, + 824 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An alternative approach is to implement diffusion models in the raw pixel space. While intuitive, this becomes computationally unaffordable for high-resolution images due to the substantial resources required to process per-pixel correlations. Considering this, prior research [20, 22, 44,", + "bbox": [ + 89, + 825, + 483, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 252, + 895, + 398 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg", + "image_caption": [ + "(a) Latent-based Diffusion Models (Two stages)", + "(b) Pixel-based Diffusion Models (Two stages)" + ], + "image_footnote": [], + "bbox": [ + 522, + 412, + 895, + 547 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg", + "image_caption": [ + "(c) PixelFlow (End-to-end one stage)", + "Figure 1. Comparisons of Design Paradigms between latent-based diffusion models (LDMs), pixel-based diffusion models (PDMs), and PixelFlow: (a) LDMs split training into two separate stages—first independently training off-the-shell VAEs, then training diffusion models on tokens extracted from the pre-trained VAEs; (b) Previous PDMs typically train two separate models: a diffusion model on low-resolution images and an upsampler for high-resolution synthesis; (c) PixelFlow, by contrast, offers an end-to-end solution for pixel-based generation, combining both high efficiency and strong generative performance." + ], + "image_footnote": [], + "bbox": [ + 534, + 564, + 895, + 646 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "51, 52] has typically adopted a cascaded approach: first generating a low-resolution image, then employing additional upsamplers to produce high-quality outputs, with the low", + "bbox": [ + 511, + 854, + 906, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07963v1 [cs.CV] 10 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "resolution image serving as conditioning input, as shown in Figure 1(b). However, these cascaded methods also introduce separate networks for different stages, still limiting the benefits of end-to-end design.", + "bbox": [ + 89, + 90, + 480, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce PixelFlow, a simple but effective end-to-end framework for direct image generation in raw pixel space, without the need of separate networks like VAEs or upsamplers. As illustrated in Figure 1(c), PixelFlow uses a unified set of parameters to model multiscale samples across cascading resolutions via Flow Matching [38, 40]. At early denoising stages, when noise levels are high, PixelFlow operates on lower-resolution samples. As denoising progresses, the resolution gradually increases until it reaches the target resolution in the final stage. This progressive strategy avoids performing all denoising steps at full resolution, thereby significantly reducing the overall computational cost of the generation process.", + "bbox": [ + 89, + 159, + 482, + 354 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "During training, the cross-scale samples at different timesteps are constructed by: (1) resizing the images to successive scales and adding Gaussian noise to each scaled image; (2) interpolating between adjacent scale noisy images as model input and conducting velocity prediction. The entire model is trained end-to-end using uniformly sampled training examples from all stages. During inference, the process begins with pure Gaussian noise at the lowest resolution. The model then progressively denoises and upscales the image until the target resolution is reached.", + "bbox": [ + 89, + 362, + 482, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We evaluated PixelFlow on both class-conditional and text-to-image generation tasks. Compared to established latent-space diffusion models [42, 45, 50], PixelFlow delivers competitive performance. For instance, on the $256 \\times 256$ ImageNet class-conditional generation benchmark, PixelFlow achieves an FID of 1.98. For text-to-image generation, PixelFlow is evaluated on widely-used benchmarks, achieving 0.64 on GenEval [19] and 77.93 on DPG-Bench [26]. In addition, qualitative results in Figure 5 and Figure 6 illustrate that PixelFlow has strong visual fidelity and text-image alignment, highlighting the potential of pixel-space generation for future research.", + "bbox": [ + 89, + 521, + 482, + 702 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The contributions of PixelFlow are summarized as in the following three points:", + "bbox": [ + 89, + 709, + 482, + 739 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- By eliminating the need for a pre-trained VAE, we establish an end-to-end trainable image generation model in raw pixel space directly.", + "- Through cascade flow modeling from low resolution to high resolution, our model achieves affordable computation cost in both training and inference.", + "- PixelFlow obtains competitive performance in visual quality, including 1.98 FID on $256 \\times 256$ ImageNet class-conditional image generation benchmark and appealing properties on text-to-image generation." + ], + "bbox": [ + 91, + 746, + 482, + 897 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 89, + 653, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Latent Space Diffusion/Flow Models. Variational Autoencoders (VAEs) have become a core component in many recent generative models [16, 17, 35, 47, 48, 50, 59, 66], enabling the mapping of visual data from pixel space to a lower-dimensional, perceptually equivalent latent space. This compact representation facilitates more efficient training and inference. However, VAEs often compromise high-frequency details [47], leading to inevitable low-level artifacts in generated outputs. Motivated by a desire for algorithmic simplicity and fully end-to-end optimization, we forgo the VAE and operate directly in pixel space.", + "bbox": [ + 511, + 114, + 903, + 282 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pixel Space Diffusion/Flow Models. Early diffusion models [2, 21, 56] primarily operated directly in pixel space, aiming to capture the distributions images in a single stage. However, this approach proved both challenging and inefficient for high-resolution image generation, leading to the development of cascaded models [20, 22, 30, 52] that generate images through a sequence of stages. These cascaded models typically begin with the generation of a low-resolution image, which is subsequently upscaled by super-resolution models to achieve higher resolutions. However, the diffusion-based super-resolution process often requires starting from pure noise, conditioned on lower-resolution outputs, resulting in a time-consuming and inefficient generation process. Additionally, training these models in isolated stages hinders end-to-end optimization and necessitates carefully designed strategies to ensure the super-resolution stages.", + "bbox": [ + 511, + 305, + 903, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Furthermore, recent advancements in pixel-space generation have introduced innovative architectures. Simple Diffusion [24, 25] proposes a streamlined diffusion framework for high-resolution image synthesis, achieving strong performance on ImageNet through adjustments of model architecture and noise schedules. FractalGen [37] constructs fractal generative models by recursively invoking atomic generative modules, resulting in self-similar architectures that demonstrate strong performance in pixel-by-pixel image generation. TarFlow [68] presents a Transformer-based normalizing flow architecture capable of directly modeling and generating pixels.", + "bbox": [ + 511, + 564, + 903, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. PixelFlow", + "text_level": 1, + "bbox": [ + 513, + 761, + 620, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Preliminary: Flow Matching", + "text_level": 1, + "bbox": [ + 511, + 787, + 769, + 803 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The Flow Matching algorithm [1, 38, 40] progressively transforms a sample from a prior distribution, which is typically a standard normal distribution, to the target data distribution. This is accomplished by defining a forward process consisting of a sequence of linear paths that directly connect samples from the prior distribution to corresponding", + "bbox": [ + 511, + 809, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg", + "image_caption": [ + "Figure 2. PixelFlow for cascaded image generation from pixel space. We partition the entire generation procedure into series resolution stages. At the beginning of each resolution stage, we upscale the relatively noisy results from the preceding stage and use them as the starting point for the current stage. Consequently, as the resolution enhances, more refined samples can be obtained." + ], + "image_footnote": [], + "bbox": [ + 94, + 88, + 480, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "samples in the target distribution. During training, a training example is constructed by first sampling a target sample $\\mathbf{x}_1$ , drawing noise $\\mathbf{x}_0 \\sim \\mathcal{N}(0, 1)$ from the standard normal distribution, and selecting a timestep $t \\in [0, 1]$ . The training example is then defined through a linear interpolation:", + "bbox": [ + 89, + 393, + 483, + 470 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t} = t \\cdot \\mathbf {x} _ {1} + (1 - t) \\cdot \\mathbf {x} _ {0} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 484, + 482, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The model is trained to approximate the velocity defined by an ordinary differential equation (ODE), $\\mathbf{v}_t = \\frac{d\\mathbf{x}_t}{dt}$ , enabling it to effectively guide the transformation from the intermediate sample $\\mathbf{x}_t$ to the real data sample $\\mathbf{x}_1$ .", + "bbox": [ + 89, + 515, + 483, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A notable advantage of Flow Matching is its ability to interpolate between two arbitrary distributions, not restricted to using only a standard Gaussian as the source domain. Consequently, in image generation tasks, Flow Matching extends beyond noise-to-image scenarios and can be effectively employed for diverse applications such as image-to-image translation.", + "bbox": [ + 89, + 577, + 483, + 683 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Multi-Scale Generation in Pixel Space", + "text_level": 1, + "bbox": [ + 89, + 696, + 419, + 713 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "PixelFlow generates images by progressively increasing their resolution through a multistage denoising process. To enable this, we construct a multi-scale representation of the target image $\\mathbf{x}_1$ by recursively downsampling it by a factor of 2 at each scale. As illustrated in Figure 2, PixelFlow divides the image generation process into $S$ stages. Each stage $s\\in 0,1,\\dots,S - 1$ operates over a time interval defined by the start and end states $(\\mathbf{xt}_0^s,\\mathbf{xt}_1^s)$ . In the degenerate case where $S = 1$ , PixelFlow reduces to a standard single-stage flow matching approach for image generation, similar to recent works [17, 42], but crucially operates in pixel space rather than latent space.", + "bbox": [ + 89, + 719, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For each stage $s$ , we define the starting and ending states as follows:", + "bbox": [ + 511, + 90, + 906, + 119 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {S t a r t :} \\quad \\mathbf {x} _ {t _ {0} ^ {s}} = t _ {0} ^ {s} \\cdot \\operatorname {U p} \\left(\\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s + 1}\\right)\\right) + \\left(1 - t _ {0} ^ {s}\\right) \\cdot \\epsilon \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 132, + 906, + 150 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {E n d}: \\quad \\mathbf {x} _ {t _ {1} ^ {s}} = t _ {1} ^ {s} \\cdot \\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s}\\right) + \\left(1 - t _ {1} ^ {s}\\right) \\cdot \\epsilon , \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 531, + 152, + 906, + 170 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\text{Down}(\\cdot)$ and $\\text{Up}(\\cdot)$ denote the downsampling and upsampling operations, respectively. Unless otherwise stated, we adopt bilinear interpolation for downsampling and nearest neighbor for upsampling.", + "bbox": [ + 511, + 181, + 905, + 241 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To train the model, we sample intermediate representations by linearly interpolating between the start and end states:", + "bbox": [ + 511, + 242, + 905, + 285 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t _ {\\tau} ^ {s}} = \\tau \\cdot \\mathbf {x} _ {t _ {1} ^ {s}} + (1 - \\tau) \\cdot \\mathbf {x} _ {t _ {0} ^ {s}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 297, + 906, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\tau = \\frac{t - t_0^s}{t_1^s - t_0^s}$ is the rescaled timestep [29, 65] within the $s$ -th stage.", + "bbox": [ + 511, + 327, + 905, + 362 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then our objective is to train a model $\\mu_{\\theta}(\\cdot)$ to predict the velocity $\\mu_{\\theta}(\\mathbf{x}_{t_{\\tau}^{s},\\tau})$ with target as $\\mathbf{v}_t = \\mathbf{x}_{t_1^s} - \\mathbf{x}_{t_0^s}$ . We use the mean squared error (MSE) loss, formally represented as:", + "bbox": [ + 511, + 362, + 905, + 407 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\mathbb {E} _ {s, t, \\left(\\mathbf {x} _ {t _ {1} ^ {s}}, \\mathbf {x} _ {t _ {1} ^ {s}}\\right)} \\right\\rvert \\left\\| \\mu_ {\\theta} \\left(\\mathbf {x} _ {t _ {\\tau} ^ {s}}, \\tau\\right) - \\mathbf {v} _ {t} \\right\\| ^ {2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 417, + 905, + 440 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Model Architecture", + "text_level": 1, + "bbox": [ + 513, + 449, + 700, + 464 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We instantiate $\\mu_{\\theta}(\\cdot)$ using a Transformer-based architecture [62], chosen for its simplicity, scalability, and effectiveness in generative modeling. Specifically, our implementation is based on the standard Diffusion Transformer (DiT) [45], employing XL-scale configurations across all experiments. To better align with the PixelFlow framework, we introduce several modifications, as detailed below.", + "bbox": [ + 511, + 470, + 905, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Patchify. Following the Vision Transformer (ViT) design [15, 45], the first layer of PixelFlow is a patch embedding layer, which converts the spatial representation of the input image into a 1D sequence of tokens via a linear projection. In contrast to prior latent transformers [17, 42, 45] that operate on VAE-encoded latents, PixelFlow directly tokenizes raw pixel inputs. To support efficient attention across multiple resolutions within a batch, we apply a sequence packing strategy [11], concatenating flattened token sequences of varying lengths—corresponding to different resolutions—along the sequence dimension.", + "bbox": [ + 511, + 595, + 906, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RoPE. After patchfying, we replace the original sincos positional encoding [45] with RoPE [58] to better handle varying image resolutions. RoPE has shown strong performance in enabling length extrapolation, particularly in large language models. To adapt it for 2D image data, we apply 2D-RoPE by independently applying 1D-RoPE to the height and width dimensions, with each dimension occupying half of the hidden state.", + "bbox": [ + 511, + 779, + 908, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg", + "image_caption": [ + "Figure 3. Visualization of intermediate result of cascaded stages. We extract the intermediate results from each of the four stages for direct visualization. We observed a clear denoising process at various resolution stages." + ], + "image_footnote": [], + "bbox": [ + 98, + 88, + 903, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Resolution Embedding. Since PixelFlow operates across multiple resolutions using a shared set of model parameters, we introduce an additional resolution embedding to distinguish between resolutions. Specifically, we use the absolute resolution of the feature map after patch embedding as a conditional signal. This signal is encoded using sinusoidal position embedding [62] and added to the timestep embedding before being passed into the model.", + "bbox": [ + 89, + 297, + 483, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Text-to-Image Generation. While class-conditional image generation typically integrates conditioning information through adaptive layer normalization (adaLN)[45], we extend PixelFlow to support text-to-image generation by introducing a cross-attention layer after each self-attention layer within every Transformer block [6, 7]. This design allows the model to effectively align visual features with the textual input at every stage of the generation process. Following recent work [8, 59], we adopt the Flan-T5-XL language model [10] to extract rich text embeddings, which serve as conditioning signals throughout the network.", + "bbox": [ + 89, + 441, + 483, + 608 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Training and Inference", + "text_level": 1, + "bbox": [ + 89, + 619, + 305, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To facilitate efficient training, we uniformly sample training examples from all resolution stages using the interpolation scheme defined in Equation (4). Additionally, we employ the sequence packing technique [11], which enables joint training of scale-variant examples within a single minibatch, improving both efficiency and scalability.", + "bbox": [ + 89, + 642, + 482, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During inference, the generation process begins with pure Gaussian noise at the lowest resolution and progressively transitions to higher resolutions through multiple stages. Within each resolution stage, we apply standard flow-based sampling, using either the Euler discrete sampler [17] or the Dopri5 solver, depending on the desired trade-off between speed and accuracy. To ensure smooth and coherent transitions across scales, we adopt an ronoising strategy [29, 60], which effectively mitigates the jumping point issue [4] often observed in multi-scale generation pipelines.", + "bbox": [ + 89, + 734, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 511, + 296, + 645, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we first detail our experimental setup in Sec. 4.1. Subsequently, we analyze key components of our approach, including model design (Sec. 4.2) and inference configurations (Sec. 4.3). Finally, we benchmark PixelFlow against state-of-the-art methods on class- (Sec. 4.4) and text-to-image (Sec. 4.5) generation tasks.", + "bbox": [ + 511, + 321, + 906, + 412 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 511, + 420, + 702, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluate PixelFlow for class-conditional image generation on the ImageNet-1K [12] dataset. Unless stated otherwise, we train PixelFlow at $256 \\times 256$ resolution. All models are trained using the AdamW optimizer [32, 41] with a constant learning rate of $1 \\times 10^{-4}$ . Performance is primarily measured by Fréchet Inception Distance (FID) using the standard evaluation toolkit1. We also report Inception Score (IS) [53], sFID [43], and Precision/Recall [33].", + "bbox": [ + 511, + 441, + 905, + 561 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For text-conditional image generation, we progressively train PixelFlow from $256 \\times 256$ up to $1024 \\times 1024$ resolution. We include qualitative comparisons with current start-of-the-art generative models, along with quantitative assessments on popular benchmarks such as T2I-CompBench [27], GenEval [19], and DPG-Bench [26].", + "bbox": [ + 511, + 563, + 905, + 652 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Model Design", + "text_level": 1, + "bbox": [ + 511, + 660, + 656, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Kickoff sequence length. In principle, PixelFlow can be trained to progressively increase resolution from very low resolution (e.g., $1 \\times 1$ ) up to the target resolution. However, this approach is inefficient in practice, as tokens at extremely low resolutions convey limited meaningful information. Furthermore, allocating excessive timesteps to very short sequences underutilizes the computational capacity of modern GPUs, resulting in decreased model FLOPS utilization. Therefore, we explore how varying the resolution at which image generation begins, which we call kickoff image resolution, impacts overall performance.", + "bbox": [ + 511, + 683, + 905, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For our transformer-based backbone, the number of tokens involved in attention operations is determined by the", + "bbox": [ + 511, + 849, + 905, + 878 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "", + "bbox": [ + 531, + 887, + 870, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8beded7c1a3aa975ea063d812dd598403ae35915c9c6a44cc8f67d5cc82ffdac.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
kickoff seq. len.FID ↓sFID ↓IS ↑Precision ↑Recall ↑
32×323.346.1184.750.780.57
8×83.216.2378.500.780.56
2×23.496.4567.810.780.54
", + "bbox": [ + 96, + 89, + 477, + 159 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/e4f49219eea9ec876a4ab556fba046171327dec81549b16dd71d6a4fecb0320f.jpg", + "table_caption": [ + "Table 1. Effect of kickoff sequence length. All models are trained with 600k iterations on ImageNet-1K. Patch size is $2 \\times 2$ and target image resolution is $64 \\times 64$ ." + ], + "table_footnote": [], + "table_body": "
patch sizeFID ↓sFID ↓IS ↑Precision ↑Recall ↑speed†
target res.64×64; kickoff seq.len.2×2; 600K iters
2×23.496.4567.810.780.541.28
4×43.415.5268.830.770.560.58
target res.256×256; kickoff seq.len.2×2; 100K iters
2×228.506.4047.370.580.5330.88
4×433.177.7142.290.570.527.31
8×847.509.6331.190.450.503.96
target res.256×256; kickoff seq.len.2×2; 1600K iters; EMA
4×42.815.48251.790.820.557.31
8×84.655.42195.500.790.543.96
", + "bbox": [ + 96, + 229, + 477, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "raw image resolution and the patch size. In this experiment, we maintain a consistent patch size of $2 \\times 2$ [45], making the kickoff sequence length directly dependent on the kickoff image resolution. Specifically, we evaluate three kickoff sequence length— $2 \\times 2$ , $8 \\times 8$ , and $32 \\times 32$ while keeping the target resolution fixed at $64 \\times 64$ . Notably, the $32 \\times 32$ setting represents a vanilla pixel-based approach without cascading across resolutions.", + "bbox": [ + 89, + 566, + 482, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 1, among these configurations, the $8 \\times 8$ kickoff sequence length achieves comparable or even slightly improved FID compared to the $32 \\times 32$ baseline. This suggests that initiating generation from an appropriately smaller resolution and progressively scaling up can maintain generation quality while improving computational efficiency by allocating fewer computations to the largest resolution stage. Conversely, reducing the kickoff sequence length further to $2 \\times 2$ results in a performance degradation, likely because tokens at extremely low resolutions provide limited useful information and insufficient guidance for subsequent generation steps. Taking into account both generation quality and computational efficiency, we therefore adopt $8 \\times 8$ as our default kickoff sequence length.", + "bbox": [ + 89, + 688, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7e57ec6108ab75d81d9644abadf0d757a0102e7a9fb3debaeb9465e0877cf73d.jpg", + "table_caption": [ + "Table 2. Effect of patch size. All models have a kickoff sequence length of $2 \\times 2$ . Upper: target resolution of $64 \\times 64$ ; Middle: target resolution of $256 \\times 256$ resolution, training with 100K iterations due to computational constraints of patch size $2 \\times 2$ ; Bottom: Extended training to 1600K iterations at $256 \\times 256$ resolution.†Speed measured as number of seconds per sample on a single GPU with a batchsize of 50." + ], + "table_footnote": [], + "table_body": "
stepFID ↓sFID ↓IS ↑Precision ↑Recall ↑
103.395.98255.270.800.54
202.535.53272.130.820.56
302.515.82274.920.820.56
402.556.58272.680.810.56
", + "bbox": [ + 514, + 89, + 906, + 172 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/bc8cb8556707360b013a1190d7d5c509b2a7f2bea7b60c77621658eab126a3e8.jpg", + "table_caption": [ + "(a) Effect of number of steps per stage. CFG is a global constant value 1.50, sample function is Euler." + ], + "table_footnote": [], + "table_body": "
solverFID ↓sFID ↓IS ↑Precision ↑Recall ↑
Euler2.515.82274.920.820.56
Dopri52.435.38282.200.830.56
", + "bbox": [ + 514, + 205, + 906, + 261 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6cbd33b515a21068b5fcb4146239d04f29be501e2f8283f26fcd92f989d561aa.jpg", + "table_caption": [ + "(b) Effect of sample function. CFG is a global constant value 1.50, the number of steps per stage is 30 in Euler, the absolute tolerance is 1e-6 in Dopri5." + ], + "table_footnote": [], + "table_body": "
cfg schedulecfg max valueFID ↓IS ↑
global constant1.502.43282.2
stage-wise constant2.401.98282.1
", + "bbox": [ + 514, + 305, + 906, + 362 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(c) Effect of classifier-free guidance (CFG) setting. Sample function is Dopri5 with absolute tolerance 1e-6.", + "bbox": [ + 513, + 363, + 906, + 388 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3. Inference Setting. The best performance is obtained by CFG step-wise constant with maximum value 2.40 and Dopri5 sample function.", + "bbox": [ + 511, + 404, + 905, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Patch size. Next, we investigate the impact of patch size on model performance while maintaining a kickoff sequence length of $2 \\times 2$ . Initially, we experiment with a target resolution of $64 \\times 64$ and compare two patch sizes— $2 \\times 2$ and $4 \\times 4$ —with results presented in the upper section of Table 2. We observe that PixelFlow achieves very similar performance across these two settings, with the $4 \\times 4$ patch slightly outperforming the $2 \\times 2$ patch on four out of five evaluation metrics. Furthermore, using a patch size of $4 \\times 4$ eliminates the highest-resolution stage required by the $2 \\times 2$ patch size configuration, thus improving efficiency.", + "bbox": [ + 511, + 474, + 906, + 641 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When scaling to a larger target resolution (i.e., $256 \\times 256$ ), employing a patch size of $2 \\times 2$ becomes computationally infeasible due to substantial resource demands, limiting our experiments to only 100K training iterations (middle section of Table 2). This constraint necessitates adopting larger patch sizes. Although increasing the patch size further to $8 \\times 8$ significantly enhances computational efficiency, it leads to a noticeable drop in performance quality. Moreover, this performance gap persists even after extended training (1600K iterations), as shown in the bottom section of Table 2. Considering both generation quality and computational cost, we therefore select a patch size of $4 \\times 4$ as our default setting.", + "bbox": [ + 511, + 641, + 908, + 837 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Inference Schedule", + "text_level": 1, + "bbox": [ + 511, + 847, + 696, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Table 3, we provide a detailed analysis of the inference configuration space, including the number of inference", + "bbox": [ + 511, + 869, + 906, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg", + "image_caption": [ + "Figure 4. Qualitative results of class-conditional image generation of PixelFlow. All images are $256 \\times 256$ resolution." + ], + "image_footnote": [], + "bbox": [ + 91, + 89, + 906, + 482 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "steps at each resolution stage, the choice of ODE solver, and the scheduling of classifier-free guidance (CFG).", + "bbox": [ + 89, + 530, + 482, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Number of sample steps. In Table 3a, we evaluate the impact of the number of inference steps per resolution stage on generation quality. As the number of steps increases, we observe consistent improvements in FID, sFID, and IS, with the best overall performance achieved at 30 steps. Beyond this point, gains saturate and even slightly decline, indicating diminishing returns.", + "bbox": [ + 89, + 579, + 482, + 685 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A notable advantage of PixelFlow is its flexibility in assigning different numbers of sampling steps to each resolution stage during inference. This adaptive configuration allows fine-grained control over the sampling process, enabling performance-efficiency trade-offs. Moving beyond a uniform setting and exploring more granular stage-specific step allocations holds the potential for further performance enhancements.", + "bbox": [ + 89, + 685, + 482, + 805 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ODE Solver. We further investigate the effect of the ODE solver type on generation quality. As shown in Table 3b, we compare the first-order Euler solver with the adaptive higher-order Dormand-Prince (Dopri5) solver [14]. The results indicate that Dopri5 consistently outperforms Euler", + "bbox": [ + 89, + 824, + 482, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "across most evaluation metrics, achieving lower FID and sFID scores, a higher Inception Score, and slightly better precision, while maintaining similar recall. This demonstrates that more accurate and adaptive solvers, such as Dopri5, can better capture the generative dynamics, leading to higher-quality samples—though often with increased computational cost.", + "bbox": [ + 511, + 530, + 906, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CFG Schedule. Inspired by the recent process [5, 34, 63], we propose a stage-wise CFG schedule, where different stages apply different CFG values, and from the early stage to the later stage, the value increases from 1 to $\\mathrm{CFG}_{\\mathrm{max}}$ . In the condition of 4 stages, we find that 0, 1/6, 2/3 and 1 of the $(\\mathrm{CFG}_{\\mathrm{max}} - 1)$ give the best FID performance. The comparison between global constant CFG and stage-wise CFG is shown in Table 3c, in which we search the best CFG value for each method. Our proposed stage-wise CFG boosts the FID performance from 2.43 to 1.98.", + "bbox": [ + 511, + 657, + 908, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Comparison on ImageNet Benchmark", + "text_level": 1, + "bbox": [ + 511, + 832, + 841, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Table 4, we compare PixelFlow with both latent-based and pixel-based image generation models on the ImageNet $256 \\times 256$ benchmark. PixelFlow achieves an FID of 1.98,", + "bbox": [ + 511, + 854, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 935 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0e303bdb1b93ae43dcdd2c8a5980195c88032b7943e9c9c23810e68612c366c0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelFID ↓ sFID ↓ IS ↑Precision ↑Recall ↑
Latent Space
LDM-4-G [50]3.60-247.70.870.48
DiT-XL/2 [45]2.274.60278.20.830.57
SiT-XL/2 [42]2.064.49277.50.830.59
Pixel Space
ADM-G [13]4.595.25186.70.820.52
ADM-U [13]3.946.14215.80.830.53
CDM [22]4.88-158.7--
RIN [9, 28]3.42-182.0--
SD, U-ViT-L [24]2.77-211.8--
MDM [20]3.51----
StyleGAN-XL [54]2.304.02265.10.780.53
VDM++ [31]2.12-267.7--
PaGoDA [30]1.56-259.6-0.59
SiD2 [25]1.38----
JetFormer [61]6.64--0.690.56
FractalMAR-H [37]6.15-348.90.810.46
PixelFlow (ours)1.985.83282.10.810.60
", + "bbox": [ + 94, + 88, + 478, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "representing highly competitive performance relative to state-of-the-art latent-space methods. For instance, it outperforms LDM [50] (FID 3.60), DiT [45] (FID 2.27), and SiT [42] (FID 2.06), while achieving comparable IS and recall scores. These results highlight the effectiveness of our design, suggesting that PixelFlow can serve as a strong prototype for high-quality visual generation systems.", + "bbox": [ + 88, + 474, + 482, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Compared with recent pixel-based models, PixelFlow achieves superior sample quality. It notably outperforms FractalMAR-H [37], and also delivers competitive or better results than strong baselines like ADM-U [13], SiD2 [25], and VDM++ [31].", + "bbox": [ + 89, + 582, + 483, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We visualize class-conditional image generation of PixelFlow at $256 \\times 256$ resolution in Figure 4. We can observe our model is able to generate images of high visual quality across a wide range of classes.", + "bbox": [ + 89, + 659, + 483, + 720 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Text-to-Image Generation", + "text_level": 1, + "bbox": [ + 89, + 739, + 326, + 757 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Settings. We adopt a two-stage training strategy for text-to-image generation of PixelFlow. First, the model is initialized with an ImageNet-pretrained checkpoint at a resolution of $256 \\times 256$ and trained on a subset of the LAION dataset [55] at the same resolution. In the second stage, we fine-tune the model on a curated set of high-aesthetic-quality images at a higher resolution of $512 \\times 512$ . All reported results for PixelFlow are based on this final $512 \\times 512$ resolution model.", + "bbox": [ + 88, + 763, + 482, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/960f1767c92ece40bd94624c230c619ad8e2686c31b28e678b596ac051a2b79b.jpg", + "table_caption": [ + "Table 4. Comparisons on class-conditional image generation on ImageNet $256 \\times 256$ . PixelFlow achieves competitive performance compared with latent space based models." + ], + "table_footnote": [], + "table_body": "
MethodGenEval OverallT2I-CompBenchDPG Bench
ColorShapeTexture
SDv1.5 [50]0.430.37300.36460.421963.18
DALL-E 2 [49]0.520.57500.54640.6374-
SDv2.1 [50]0.500.56940.44950.4982-
SDXL [47]0.550.63690.54080.563774.65
PixArt-α [6]0.480.68860.55820.704471.11
DALL-E 3 [3]0.67†0.8110†0.6750†0.8070†83.50†
GenTron [7]-0.76740.57000.7150-
SD3 [17]0.74----
Transfusion [70]0.63----
LlamaGen [59]0.32----
Emu 3 [64]0.66†0.7913†0.5846†0.7422†80.60
PixelFlow (ours)0.600.75780.45290.600677.93
0.64†0.7689†0.5059†0.6273†
", + "bbox": [ + 514, + 88, + 906, + 339 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Comparison with state-of-the-art models on text-to-image generation benchmarks. We evaluate on GenEval [19], T2I-CompBench [27] and DPG-Bench [26]. We use $\\dagger$ to indicate the result with prompt rewriting.", + "bbox": [ + 511, + 349, + 906, + 406 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To comprehensively evaluate the performance of PixelFlow-T2I in text-to-image generation, we employ three widely recognized benchmarks, each targeting a different facet of compositional understanding: T2I-CompBench [27] assesses alignment between generated images and complex semantic relationships in text. We evaluate three tasks—color, shape, and texture binding—by generating five images per prompt across 300 prompts per sub-task. Alignment is measured using BLIP-VQA[36]; GenEval [19] evaluates compositional aspects such as coherence and spatial arrangement. We generate over 2,000 images from 553 prompts and report the average performance across tasks; DPG-Bench [26] focuses on complex textual descriptions, with 4,000 images generated from 1,065 prompts and results averaged across tasks.", + "bbox": [ + 511, + 433, + 908, + 661 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative results. As shown in Table 5, PixelFlow achieves competitive performance across all benchmarks, demonstrating strong compositional understanding in freeform text-to-image generation. It performs particularly well on T2I-CompBench, with high scores in color and texture binding, and solid results on GenEval (0.64) and DPG-Bench (77.93), surpassing many established models. These results underscore PixelFlow as a promising direction for pixel-space image generation conditioned on natural language—showcasing its potential for open-ended, text-driven image synthesis.", + "bbox": [ + 511, + 681, + 908, + 849 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visualization. We visualize the intermediate results during the sampling process in Figure 3, specifically show", + "bbox": [ + 511, + 869, + 906, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg", + "image_caption": [ + "A native Warrior shaman Bengal Cat with a black and white leopard pattern, blue eyes, short fur, and portrait pose, colorful feathers and colorful ornaments, a regal oil-style portrait of the queen of native Kitty shaman white Cat with wings and headdress. Nordic is kind and motherly, it has black eye makeup and her hair is in messy." + ], + "image_footnote": [], + "bbox": [ + 101, + 90, + 410, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg", + "image_caption": [ + "1940s vintage colored photo of a well-groomed man, crew cut hair, front view, kodak portrait film" + ], + "image_footnote": [], + "bbox": [ + 98, + 406, + 251, + 523 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg", + "image_caption": [ + "A cute 3 year old Chinese girl with a big head and a small body, hair is fluffy and messy tied in a pill head, big eyes, one eye blinking, doe mouth, playful and cute." + ], + "image_footnote": [], + "bbox": [ + 254, + 406, + 383, + 522 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg", + "image_caption": [ + "An extremely happy American Cocker Spaniel is smiling and looking up at the camera with his head tilted to one side." + ], + "image_footnote": [], + "bbox": [ + 419, + 89, + 573, + 207 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg", + "image_caption": [ + "Full body portrait of deer by side, visible realistic, with style as a painting in the style by Caravaggio" + ], + "image_footnote": [], + "bbox": [ + 581, + 88, + 733, + 205 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg", + "image_caption": [ + "Greeting card, party, hyped animal, open mouth, surprised excitement" + ], + "image_footnote": [], + "bbox": [ + 421, + 255, + 573, + 373 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg", + "image_caption": [ + "Super cute clay world, isometric view of Eiffel Tower in Paris, cute clay stop motion animation, people" + ], + "image_footnote": [], + "bbox": [ + 581, + 255, + 733, + 373 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg", + "image_caption": [ + "Close-up of an aged man with weathered features and sharp blue eyes peering wisely from beneath a tweed flat cap." + ], + "image_footnote": [], + "bbox": [ + 419, + 406, + 573, + 523 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg", + "image_caption": [ + "A white bearded man's face emerges from a cloud of white butterflies, background is white", + "Figure 5. Qualitative results of text-conditional generation of PixelFlow. All images are $512 \\times 512$ resolution. Key components of the prompt are highlighted in RED." + ], + "image_footnote": [], + "bbox": [ + 584, + 407, + 730, + 525 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 90, + 895, + 207 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg", + "image_caption": [ + "A digital art piece featuring a splitface portrait of a woman. The left side of face is in a calm, while the right side shows a more intense and red color" + ], + "image_footnote": [], + "bbox": [ + 743, + 256, + 895, + 372 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg", + "image_caption": [ + "A baby cat stands on two legs. facing forward, wearing an Indian classical gloves and shoes.", + "Johannes Vermeer, panda wearing pearl earrings, blue headbands, artwork Girl with a Pearl Earring oil painting," + ], + "image_footnote": [], + "bbox": [ + 741, + 402, + 895, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ing the final step of each resolution stage. As resolution increases, a clear denoising trend emerges—images become progressively cleaner and less noisy at each stage. Additional generated samples along with their input text prompts are shown in Figure 5 (512×512) and Figure 6 (1024×1024). PixelFlow demonstrates high visual fidelity and strong text-image alignment, effectively capturing key visual elements and their relationships from complex prompts. Notably, it generates fine-grained details—such as animal fur, human hair, and hat textures—highlighting its strong attention to detail in pixel space.", + "bbox": [ + 88, + 643, + 482, + 811 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 89, + 828, + 209, + 844 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce PixelFlow, a novel image generation model that re-think the predominance of latent space based models by directly operating on raw pixel space. By directly", + "bbox": [ + 89, + 854, + 482, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "transforming between different resolution stages, our model exhibits a compelling advantage in simplicity and end-to-end trainability. On both class-conditional image generation and text-to-image generation benchmarks, PixelFlow has been proven to demonstrate competitive image generation capabilities compared to popular latent space-based methods. We hope that this new perspective will inspire future research in visual generation models.", + "bbox": [ + 511, + 643, + 906, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations Despite its advantages, PixelFlow still faces certain limitations. Although the model avoids full-resolution computation across all stages, the final stage requires full-resolution attention, which accounts for roughly $80\\%$ of the total inference time. Moreover, we observe that training convergence slows as the sequence length increases. Addressing these challenges presents opportunities for future improvements in efficiency and scalability.", + "bbox": [ + 511, + 777, + 908, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg", + "image_caption": [], + "image_footnote": [ + "Raspberry in the form of women walk along the path of a fairy tale forest. She carries a jug of water with her. Her head is made of one big raspberry on which she has big and beautiful eyes, as well as nose and mouth." + ], + "bbox": [ + 179, + 88, + 620, + 428 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg", + "image_caption": [], + "image_footnote": [ + "An embroidered sweater with an anatomical illustration of the human torso and chest, the skin is open to reveal the internal anatomy." + ], + "bbox": [ + 651, + 88, + 800, + 237 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 281, + 820, + 428 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg", + "image_caption": [ + "Figure 6. Qualitative samples of PixelFlow. We present the generated images of $1024 \\times 1024$ resolution. Key words are highlighted in RED." + ], + "image_footnote": [ + "Photorealistic, 4k, a micro baby African Buffalo perched on a coffee cup" + ], + "bbox": [ + 178, + 476, + 331, + 595 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg", + "image_caption": [], + "image_footnote": [ + "Great Dane Dog sitting on a toilet bowl in wide bathroom, reading a large double page spread newspaper, sit like human. The background is in a white room." + ], + "bbox": [ + 178, + 632, + 333, + 750 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg", + "image_caption": [], + "image_footnote": [ + "A picture of Joe rogan's head on a cat's body, sitting behind a podcasting microphone." + ], + "bbox": [ + 338, + 478, + 493, + 595 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg", + "image_caption": [], + "image_footnote": [ + "Full body shot of balenciaga fashion model and parrot hybrid with a human body and the head of the parrot. He is walking through a podium like a model." + ], + "bbox": [ + 338, + 632, + 493, + 750 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg", + "image_caption": [], + "image_footnote": [ + "Prototype flying fox made from blown glass, Lino Tagliapietra style Muranean glassmaking, intricate details.", + "3D illustration of the chip with text \"AI\" floating above it, with a blue color scheme." + ], + "bbox": [ + 503, + 476, + 656, + 595 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg", + "image_caption": [], + "image_footnote": [ + "Sketch sheet of anatomical studies by Leonardo da Vinci Iron man and weapons, show detailed studies of technology and body, use little soft details in red and gold for the armor, mathematic." + ], + "bbox": [ + 503, + 632, + 656, + 750 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg", + "image_caption": [], + "image_footnote": [ + "The world's smallest laughing baby Piggy, perched on someone's finger." + ], + "bbox": [ + 665, + 476, + 820, + 595 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg", + "image_caption": [], + "image_footnote": [ + "Telephoto lens shooting, panoramic view, a white sheep struggling desperately under the sea, with bubbles constantly popping out of its mouth, realistic and lifelike." + ], + "bbox": [ + 665, + 632, + 818, + 751 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In The Eleventh International Conference on Learning Representations, 2023. 2", + "[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 2", + "[3] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.opennai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 7", + "[4] Andrew Campbell, William Harvey, Christian Dietrich Weilbach, Valentin De Bortoli, Tom Rainforth, and Arnaud Doucet. Trans-dimensional generative modeling via jump diffusion models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 4", + "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 6", + "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 4, 7", + "[7] Shoufa Chen, Mengmeng Xu, Jiawei Ren, Yuren Cong, Sen He, Yanping Xie, Animesh Sinha, Ping Luo, Tao Xiang, and Juan-Manuel Perez-Rua. Gentron: Diffusion transformers for image and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6441-6451, 2024. 1, 4, 7", + "[8] Shoufa Chen, Chongjian Ge, Yuqi Zhang, Yida Zhang, Fengda Zhu, Hao Yang, Hongxiang Hao, Hui Wu, Zhichao Lai, Yifei Hu, et al. Goku: Flow based video generative foundation models. arXiv preprint arXiv:2502.04896, 2025.1, 4", + "[9] Ting Chen. On the importance of noise scheduling for diffusion models. arXiv preprint arXiv:2301.10972, 2023. 7", + "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instructionfinetuned language models. Journal of Machine Learning Research, 25(70):1-53, 2024. 4", + "[11] Mostafa Dehghani, Basil Mustafa, Josip Djolonga, Jonathan Heek, Matthias Minderer, Mathilde Caron, Andreas Steiner, Joan Puigcerver, Robert Geirhos, Ibrahim M Alabdul-mohsin, et al. Patch n'pack: Navit, a vision transformer for any aspect ratio and resolution. Advances in Neural Information Processing Systems, 36, 2024. 3, 4", + "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4" + ], + "bbox": [ + 93, + 114, + 483, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 7", + "[14] John R Dormand and Peter J Prince. A family of embedded runge-kutta formulae. Journal of computational and applied mathematics, 6(1):19-26, 1980. 6", + "[15] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 3", + "[16] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 2", + "[17] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1, 2, 3, 4, 7", + "[18] Zach Evans, CJ Carr, Josiah Taylor, Scott H Hawley, and Jordi Pons. Fast timing-conditioned latent audio diffusion. In *Forty-first International Conference on Machine Learning*, 2024. 1", + "[19] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36, 2024. 2, 4, 7", + "[20] Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Joshua M Susskind, and Navdeep Jaitly. Matryoshka diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 1, 2, 7", + "[21] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2", + "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. Journal of Machine Learning Research, 23(47):1-33, 2022. 1, 2, 7", + "[23] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 1", + "[24] Emiel Hoogeboom, Jonathan Heek, and Tim Salimans. simple diffusion: End-to-end diffusion for high resolution images. In International Conference on Machine Learning, pages 13213-13232. PMLR, 2023. 2, 7", + "[25] Emiel Hoogeboom, Thomas Mensink, Jonathan Heek, Kay Lamerigts, Ruiqi Gao, and Tim Salimans. Simpler diffusion (sid2): 1.5 fid on imagenet512 with pixel-space diffusion. arXiv preprint arXiv:2410.19324, 2024. 2, 7", + "[26] Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024. 2, 4, 7", + "[27] Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023. 4, 7" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Allan Jabri, David Fleet, and Ting Chen. Scalable adaptive computation for iterative generation. arXiv preprint arXiv:2212.11972, 2022. 7", + "[29] Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.3,4", + "[30] Dongjun Kim, Chieh-Hsin Lai, Wei-Hsiang Liao, Yuhta Takida, Naoki Murata, Toshimitsu Uesaka, Yuki Mitsufuji, and Stefano Ermon. Pagoda: Progressive growing of a one-step generator from a low-resolution diffusion teacher. arXiv preprint arXiv:2405.14822, 2024. 2, 7", + "[31] Diederik Kingma and Ruiqi Gao. Understanding diffusion objectives as the elbo with simple data augmentation. Advances in Neural Information Processing Systems, 36, 2024. 7", + "[32] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015. 4", + "[33] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 4", + "[34] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6", + "[35] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.1, 2", + "[36] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022. 7", + "[37] Tianhong Li, Qinyi Sun, Lijie Fan, and Kaiming He. Fractal generative models. arXiv preprint arXiv:2502.17437, 2025.2, 7", + "[38] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In The Eleventh International Conference on Learning Representations, 2023. 2", + "[39] Haohe Liu, Zehua Chen, Yi Yuan, Xinhao Mei, Xubo Liu, Danilo Mandic, Wenwu Wang, and Mark D Plumbley. Audioldm: Text-to-audio generation with latent diffusion models. arXiv preprint arXiv:2301.12503, 2023. 1", + "[40] Xingchao Liu, Chengyue Gong, and qiang liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. In The Eleventh International Conference on Learning Representations, 2023. 2", + "[41] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 4", + "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable" + ], + "bbox": [ + 91, + 90, + 485, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 7", + "[43] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 4", + "[44] NVIDIA. Edify image: High-quality image generation with pixel space laplacian diffusion model. arXiv preprint arXiv:2411.07126, 2024. 1", + "[45] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 1, 2, 3, 4, 5, 7", + "[46] Pablo Pernias, Dominic Rampas, Mats L Richter, Christopher J Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. arXiv preprint arXiv:2306.00637, 2023. 1", + "[47] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 1, 2, 7", + "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International conference on machine learning, pages 8821-8831. Pmlr, 2021. 2", + "[49] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7", + "[50] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 7", + "[51] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 1", + "[52] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image superresolution via iterative refinement. IEEE transactions on pattern analysis and machine intelligence, 45(4):4713-4726, 2022. 1, 2", + "[53] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 4", + "[54] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 7", + "[55] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 7" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[56] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pages 2256-2265. PMLR, 2015. 2", + "[57] Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, et al. Ldm3d: Latent diffusion model for 3d. arXiv preprint arXiv:2305.10853, 2023. 1", + "[58] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 3", + "[59] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 2, 4, 7", + "[60] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 4", + "[61] Michael Tschannen, André Susano Pinto, and Alexander Kolesnikov. Jetformer: An autoregressive generative model of raw images and text. arXiv preprint arXiv:2411.19722, 2024. 7", + "[62] Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Neural Information Processing Systems, 2017. 3, 4", + "[63] Xi Wang, Nicolas Dufour, Nefeli Andreou, Marie-Paule Cani, Victoria Fernández Abrevaya, David Picard, and Vicky Kalogeiton. Analysis of classifier-free guidance weight schedulers. arXiv preprint arXiv:2404.13040, 2024. 6", + "[64] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 7", + "[65] Hanshu Yan, Xingchao Liu, Jiachun Pan, Jun Hao Liew, qiang liu, and Jiashi Feng. PeRFlow: Piecewise rectified flow as universal plug-and-play accelerator. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 3", + "[66] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 1, 2", + "[67] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. LION: Latent point diffusion models for 3d shape generation. In Advances in Neural Information Processing Systems, 2022. 1", + "[68] Shuangfei Zhai, Ruixiang Zhang, Preetum Nakkiran, David Berthelot, Jiatao Gu, Huangjie Zheng, Tianrong Chen, Miguel Angel Bautista, Navdeep Jaitly, and Josh Susskind. Normalizing flows are capable generative models. arXiv preprint arXiv:2412.06329, 2024. 2" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Shilong Zhang, Wenbo Li, Shoufa Chen, Chongjian Ge, Peize Sun, Yida Zhang, Yi Jiang, Zehuan Yuan, Binyue Peng, and Ping Luo. Flashvideo: Flowing fidelity to detail for efficient high-resolution video generation. arXiv preprint arXiv:2502.05179, 2025. 1", + "[70] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 7" + ], + "bbox": [ + 516, + 90, + 903, + 231 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_model.json b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_model.json new file mode 100644 index 0000000000000000000000000000000000000000..89c2d1155380ac9faba6ec475f6b55285357ea30 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_model.json @@ -0,0 +1,2809 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.07963v1 [cs.CV] 10 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.232, + 0.131, + 0.768, + 0.154 + ], + "angle": 0, + "content": "PixelFlow: Pixel-Space Generative Models with Flow" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.18, + 0.812, + 0.221 + ], + "angle": 0, + "content": "Shoufa Chen1 Chongjian Ge1,2 Shilong Zhang1 Peize Sun1 Ping Luo1 \n1The University of Hong Kong 2Adobe" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.254, + 0.327, + 0.27 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.286, + 0.485, + 0.513 + ], + "angle": 0, + "content": "We present PixelFlow, a family of image generation models that operate directly in the raw pixel space, in contrast to the predominant latent-space models. This approach simplifies the image generation process by eliminating the need for a pre-trained Variational Autoencoder (VAE) and enabling the whole model end-to-end trainable. Through efficient cascade flow modeling, PixelFlow achieves affordable computation cost in pixel space. It achieves an FID of 1.98 on \\(256 \\times 256\\) ImageNet class-conditional image generation benchmark. The qualitative text-to-image results demonstrate that PixelFlow excels in image quality, artistry, and semantic control. We hope this new paradigm will inspire and open up new opportunities for next-generation visual generation models. Code and models are available at https://github.com/ShoufaChen/PixelFlow." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.543, + 0.223, + 0.559 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.58, + 0.454, + 0.595 + ], + "angle": 0, + "content": "Numquam ponenda est pluralitas sine necessitate." + }, + { + "type": "text", + "bbox": [ + 0.324, + 0.607, + 0.475, + 0.621 + ], + "angle": 0, + "content": "William of Ockham" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.644, + 0.483, + 0.825 + ], + "angle": 0, + "content": "Driven by the success of the Stable Diffusion (SD) model series [17, 46, 47, 50], latent diffusion models (LDMs) [50] have emerged as the de facto standard for generative modeling across diverse modalities, spanning image [17, 35, 45], video [7, 8, 23, 66, 69], audio [18, 39], and 3D [57, 67]. As shown in Figure 1 (a), LDMs compress raw data into a compact latent space using pre-trained Variencoders (VAEs). This compression reduces computational demands and facilitates efficient diffusion denoising. Despite their widespread success, LDMs decouple the VAE and diffusion components, hindering joint optimization and complicating holistic diagnosis." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.484, + 0.902 + ], + "angle": 0, + "content": "An alternative approach is to implement diffusion models in the raw pixel space. While intuitive, this becomes computationally unaffordable for high-resolution images due to the substantial resources required to process per-pixel correlations. Considering this, prior research [20, 22, 44," + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.253, + 0.896, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.576, + 0.401, + 0.859, + 0.414 + ], + "angle": 0, + "content": "(a) Latent-based Diffusion Models (Two stages)" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.414, + 0.896, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.573, + 0.548, + 0.845, + 0.561 + ], + "angle": 0, + "content": "(b) Pixel-based Diffusion Models (Two stages)" + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.565, + 0.897, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.596, + 0.657, + 0.825, + 0.671 + ], + "angle": 0, + "content": "(c) PixelFlow (End-to-end one stage)" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.689, + 0.907, + 0.828 + ], + "angle": 0, + "content": "Figure 1. Comparisons of Design Paradigms between latent-based diffusion models (LDMs), pixel-based diffusion models (PDMs), and PixelFlow: (a) LDMs split training into two separate stages—first independently training off-the-shell VAEs, then training diffusion models on tokens extracted from the pre-trained VAEs; (b) Previous PDMs typically train two separate models: a diffusion model on low-resolution images and an upsampler for high-resolution synthesis; (c) PixelFlow, by contrast, offers an end-to-end solution for pixel-based generation, combining both high efficiency and strong generative performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.856, + 0.907, + 0.903 + ], + "angle": 0, + "content": "51, 52] has typically adopted a cascaded approach: first generating a low-resolution image, then employing additional upsamplers to produce high-quality outputs, with the low" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "resolution image serving as conditioning input, as shown in Figure 1(b). However, these cascaded methods also introduce separate networks for different stages, still limiting the benefits of end-to-end design." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.16, + 0.483, + 0.356 + ], + "angle": 0, + "content": "In this work, we introduce PixelFlow, a simple but effective end-to-end framework for direct image generation in raw pixel space, without the need of separate networks like VAEs or upsamplers. As illustrated in Figure 1(c), PixelFlow uses a unified set of parameters to model multiscale samples across cascading resolutions via Flow Matching [38, 40]. At early denoising stages, when noise levels are high, PixelFlow operates on lower-resolution samples. As denoising progresses, the resolution gradually increases until it reaches the target resolution in the final stage. This progressive strategy avoids performing all denoising steps at full resolution, thereby significantly reducing the overall computational cost of the generation process." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.363, + 0.483, + 0.513 + ], + "angle": 0, + "content": "During training, the cross-scale samples at different timesteps are constructed by: (1) resizing the images to successive scales and adding Gaussian noise to each scaled image; (2) interpolating between adjacent scale noisy images as model input and conducting velocity prediction. The entire model is trained end-to-end using uniformly sampled training examples from all stages. During inference, the process begins with pure Gaussian noise at the lowest resolution. The model then progressively denoises and upscales the image until the target resolution is reached." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.522, + 0.483, + 0.703 + ], + "angle": 0, + "content": "We evaluated PixelFlow on both class-conditional and text-to-image generation tasks. Compared to established latent-space diffusion models [42, 45, 50], PixelFlow delivers competitive performance. For instance, on the \\(256 \\times 256\\) ImageNet class-conditional generation benchmark, PixelFlow achieves an FID of 1.98. For text-to-image generation, PixelFlow is evaluated on widely-used benchmarks, achieving 0.64 on GenEval [19] and 77.93 on DPG-Bench [26]. In addition, qualitative results in Figure 5 and Figure 6 illustrate that PixelFlow has strong visual fidelity and text-image alignment, highlighting the potential of pixel-space generation for future research." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.71, + 0.483, + 0.741 + ], + "angle": 0, + "content": "The contributions of PixelFlow are summarized as in the following three points:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.747, + 0.483, + 0.792 + ], + "angle": 0, + "content": "- By eliminating the need for a pre-trained VAE, we establish an end-to-end trainable image generation model in raw pixel space directly." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.793, + 0.483, + 0.837 + ], + "angle": 0, + "content": "- Through cascade flow modeling from low resolution to high resolution, our model achieves affordable computation cost in both training and inference." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.838, + 0.483, + 0.898 + ], + "angle": 0, + "content": "- PixelFlow obtains competitive performance in visual quality, including 1.98 FID on \\(256 \\times 256\\) ImageNet class-conditional image generation benchmark and appealing properties on text-to-image generation." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.747, + 0.483, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.09, + 0.655, + 0.106 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.116, + 0.905, + 0.283 + ], + "angle": 0, + "content": "Latent Space Diffusion/Flow Models. Variational Autoencoders (VAEs) have become a core component in many recent generative models [16, 17, 35, 47, 48, 50, 59, 66], enabling the mapping of visual data from pixel space to a lower-dimensional, perceptually equivalent latent space. This compact representation facilitates more efficient training and inference. However, VAEs often compromise high-frequency details [47], leading to inevitable low-level artifacts in generated outputs. Motivated by a desire for algorithmic simplicity and fully end-to-end optimization, we forgo the VAE and operate directly in pixel space." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.306, + 0.905, + 0.563 + ], + "angle": 0, + "content": "Pixel Space Diffusion/Flow Models. Early diffusion models [2, 21, 56] primarily operated directly in pixel space, aiming to capture the distributions images in a single stage. However, this approach proved both challenging and inefficient for high-resolution image generation, leading to the development of cascaded models [20, 22, 30, 52] that generate images through a sequence of stages. These cascaded models typically begin with the generation of a low-resolution image, which is subsequently upscaled by super-resolution models to achieve higher resolutions. However, the diffusion-based super-resolution process often requires starting from pure noise, conditioned on lower-resolution outputs, resulting in a time-consuming and inefficient generation process. Additionally, training these models in isolated stages hinders end-to-end optimization and necessitates carefully designed strategies to ensure the super-resolution stages." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.565, + 0.905, + 0.745 + ], + "angle": 0, + "content": "Furthermore, recent advancements in pixel-space generation have introduced innovative architectures. Simple Diffusion [24, 25] proposes a streamlined diffusion framework for high-resolution image synthesis, achieving strong performance on ImageNet through adjustments of model architecture and noise schedules. FractalGen [37] constructs fractal generative models by recursively invoking atomic generative modules, resulting in self-similar architectures that demonstrate strong performance in pixel-by-pixel image generation. TarFlow [68] presents a Transformer-based normalizing flow architecture capable of directly modeling and generating pixels." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.762, + 0.621, + 0.777 + ], + "angle": 0, + "content": "3. PixelFlow" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.788, + 0.771, + 0.804 + ], + "angle": 0, + "content": "3.1. Preliminary: Flow Matching" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.81, + 0.905, + 0.901 + ], + "angle": 0, + "content": "The Flow Matching algorithm [1, 38, 40] progressively transforms a sample from a prior distribution, which is typically a standard normal distribution, to the target data distribution. This is accomplished by defining a forward process consisting of a sequence of linear paths that directly connect samples from the prior distribution to corresponding" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.089, + 0.482, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.27, + 0.485, + 0.354 + ], + "angle": 0, + "content": "Figure 2. PixelFlow for cascaded image generation from pixel space. We partition the entire generation procedure into series resolution stages. At the beginning of each resolution stage, we upscale the relatively noisy results from the preceding stage and use them as the starting point for the current stage. Consequently, as the resolution enhances, more refined samples can be obtained." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.394, + 0.484, + 0.471 + ], + "angle": 0, + "content": "samples in the target distribution. During training, a training example is constructed by first sampling a target sample \\(\\mathbf{x}_1\\), drawing noise \\(\\mathbf{x}_0 \\sim \\mathcal{N}(0, 1)\\) from the standard normal distribution, and selecting a timestep \\(t \\in [0, 1]\\). The training example is then defined through a linear interpolation:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.485, + 0.483, + 0.501 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t} = t \\cdot \\mathbf {x} _ {1} + (1 - t) \\cdot \\mathbf {x} _ {0} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.516, + 0.484, + 0.577 + ], + "angle": 0, + "content": "The model is trained to approximate the velocity defined by an ordinary differential equation (ODE), \\(\\mathbf{v}_t = \\frac{d\\mathbf{x}_t}{dt}\\), enabling it to effectively guide the transformation from the intermediate sample \\(\\mathbf{x}_t\\) to the real data sample \\(\\mathbf{x}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.578, + 0.484, + 0.684 + ], + "angle": 0, + "content": "A notable advantage of Flow Matching is its ability to interpolate between two arbitrary distributions, not restricted to using only a standard Gaussian as the source domain. Consequently, in image generation tasks, Flow Matching extends beyond noise-to-image scenarios and can be effectively employed for diverse applications such as image-to-image translation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.697, + 0.421, + 0.714 + ], + "angle": 0, + "content": "3.2. Multi-Scale Generation in Pixel Space" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.484, + 0.902 + ], + "angle": 0, + "content": "PixelFlow generates images by progressively increasing their resolution through a multistage denoising process. To enable this, we construct a multi-scale representation of the target image \\(\\mathbf{x}_1\\) by recursively downsampling it by a factor of 2 at each scale. As illustrated in Figure 2, PixelFlow divides the image generation process into \\(S\\) stages. Each stage \\(s\\in 0,1,\\dots,S - 1\\) operates over a time interval defined by the start and end states \\((\\mathbf{xt}_0^s,\\mathbf{xt}_1^s)\\). In the degenerate case where \\(S = 1\\), PixelFlow reduces to a standard single-stage flow matching approach for image generation, similar to recent works [17, 42], but crucially operates in pixel space rather than latent space." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.907, + 0.121 + ], + "angle": 0, + "content": "For each stage \\( s \\), we define the starting and ending states as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.133, + 0.907, + 0.151 + ], + "angle": 0, + "content": "\\[\n\\text {S t a r t :} \\quad \\mathbf {x} _ {t _ {0} ^ {s}} = t _ {0} ^ {s} \\cdot \\operatorname {U p} \\left(\\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s + 1}\\right)\\right) + \\left(1 - t _ {0} ^ {s}\\right) \\cdot \\epsilon \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.532, + 0.154, + 0.907, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\text {E n d}: \\quad \\mathbf {x} _ {t _ {1} ^ {s}} = t _ {1} ^ {s} \\cdot \\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s}\\right) + \\left(1 - t _ {1} ^ {s}\\right) \\cdot \\epsilon , \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.182, + 0.906, + 0.242 + ], + "angle": 0, + "content": "where \\( \\text{Down}(\\cdot) \\) and \\( \\text{Up}(\\cdot) \\) denote the downsampling and upsampling operations, respectively. Unless otherwise stated, we adopt bilinear interpolation for downsampling and nearest neighbor for upsampling." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.243, + 0.906, + 0.286 + ], + "angle": 0, + "content": "To train the model, we sample intermediate representations by linearly interpolating between the start and end states:" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.299, + 0.907, + 0.318 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t _ {\\tau} ^ {s}} = \\tau \\cdot \\mathbf {x} _ {t _ {1} ^ {s}} + (1 - \\tau) \\cdot \\mathbf {x} _ {t _ {0} ^ {s}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.328, + 0.906, + 0.363 + ], + "angle": 0, + "content": "where \\(\\tau = \\frac{t - t_0^s}{t_1^s - t_0^s}\\) is the rescaled timestep [29, 65] within the \\(s\\)-th stage." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.363, + 0.906, + 0.409 + ], + "angle": 0, + "content": "Then our objective is to train a model \\(\\mu_{\\theta}(\\cdot)\\) to predict the velocity \\(\\mu_{\\theta}(\\mathbf{x}_{t_{\\tau}^{s},\\tau})\\) with target as \\(\\mathbf{v}_t = \\mathbf{x}_{t_1^s} - \\mathbf{x}_{t_0^s}\\). We use the mean squared error (MSE) loss, formally represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.418, + 0.906, + 0.441 + ], + "angle": 0, + "content": "\\[\n\\left. \\mathbb {E} _ {s, t, \\left(\\mathbf {x} _ {t _ {1} ^ {s}}, \\mathbf {x} _ {t _ {1} ^ {s}}\\right)} \\right\\rvert \\left\\| \\mu_ {\\theta} \\left(\\mathbf {x} _ {t _ {\\tau} ^ {s}}, \\tau\\right) - \\mathbf {v} _ {t} \\right\\| ^ {2} \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.45, + 0.702, + 0.465 + ], + "angle": 0, + "content": "3.3. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.472, + 0.906, + 0.577 + ], + "angle": 0, + "content": "We instantiate \\(\\mu_{\\theta}(\\cdot)\\) using a Transformer-based architecture [62], chosen for its simplicity, scalability, and effectiveness in generative modeling. Specifically, our implementation is based on the standard Diffusion Transformer (DiT) [45], employing XL-scale configurations across all experiments. To better align with the PixelFlow framework, we introduce several modifications, as detailed below." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.596, + 0.907, + 0.763 + ], + "angle": 0, + "content": "Patchify. Following the Vision Transformer (ViT) design [15, 45], the first layer of PixelFlow is a patch embedding layer, which converts the spatial representation of the input image into a 1D sequence of tokens via a linear projection. In contrast to prior latent transformers [17, 42, 45] that operate on VAE-encoded latents, PixelFlow directly tokenizes raw pixel inputs. To support efficient attention across multiple resolutions within a batch, we apply a sequence packing strategy [11], concatenating flattened token sequences of varying lengths—corresponding to different resolutions—along the sequence dimension." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.902 + ], + "angle": 0, + "content": "RoPE. After patchfying, we replace the original sincos positional encoding [45] with RoPE [58] to better handle varying image resolutions. RoPE has shown strong performance in enabling length extrapolation, particularly in large language models. To adapt it for 2D image data, we apply 2D-RoPE by independently applying 1D-RoPE to the height and width dimensions, with each dimension occupying half of the hidden state." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.089, + 0.905, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.243, + 0.908, + 0.272 + ], + "angle": 0, + "content": "Figure 3. Visualization of intermediate result of cascaded stages. We extract the intermediate results from each of the four stages for direct visualization. We observed a clear denoising process at various resolution stages." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.298, + 0.485, + 0.419 + ], + "angle": 0, + "content": "Resolution Embedding. Since PixelFlow operates across multiple resolutions using a shared set of model parameters, we introduce an additional resolution embedding to distinguish between resolutions. Specifically, we use the absolute resolution of the feature map after patch embedding as a conditional signal. This signal is encoded using sinusoidal position embedding [62] and added to the timestep embedding before being passed into the model." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.442, + 0.484, + 0.609 + ], + "angle": 0, + "content": "Text-to-Image Generation. While class-conditional image generation typically integrates conditioning information through adaptive layer normalization (adaLN)[45], we extend PixelFlow to support text-to-image generation by introducing a cross-attention layer after each self-attention layer within every Transformer block [6, 7]. This design allows the model to effectively align visual features with the textual input at every stage of the generation process. Following recent work [8, 59], we adopt the Flan-T5-XL language model [10] to extract rich text embeddings, which serve as conditioning signals throughout the network." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.621, + 0.307, + 0.637 + ], + "angle": 0, + "content": "3.4. Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.643, + 0.483, + 0.734 + ], + "angle": 0, + "content": "To facilitate efficient training, we uniformly sample training examples from all resolution stages using the interpolation scheme defined in Equation (4). Additionally, we employ the sequence packing technique [11], which enables joint training of scale-variant examples within a single minibatch, improving both efficiency and scalability." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.484, + 0.902 + ], + "angle": 0, + "content": "During inference, the generation process begins with pure Gaussian noise at the lowest resolution and progressively transitions to higher resolutions through multiple stages. Within each resolution stage, we apply standard flow-based sampling, using either the Euler discrete sampler [17] or the Dopri5 solver, depending on the desired trade-off between speed and accuracy. To ensure smooth and coherent transitions across scales, we adopt an ronoising strategy [29, 60], which effectively mitigates the jumping point issue [4] often observed in multi-scale generation pipelines." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.297, + 0.646, + 0.314 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.322, + 0.907, + 0.413 + ], + "angle": 0, + "content": "In this section, we first detail our experimental setup in Sec. 4.1. Subsequently, we analyze key components of our approach, including model design (Sec. 4.2) and inference configurations (Sec. 4.3). Finally, we benchmark PixelFlow against state-of-the-art methods on class- (Sec. 4.4) and text-to-image (Sec. 4.5) generation tasks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.421, + 0.704, + 0.437 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.443, + 0.906, + 0.563 + ], + "angle": 0, + "content": "We evaluate PixelFlow for class-conditional image generation on the ImageNet-1K [12] dataset. Unless stated otherwise, we train PixelFlow at \\(256 \\times 256\\) resolution. All models are trained using the AdamW optimizer [32, 41] with a constant learning rate of \\(1 \\times 10^{-4}\\). Performance is primarily measured by Fréchet Inception Distance (FID) using the standard evaluation toolkit1. We also report Inception Score (IS) [53], sFID [43], and Precision/Recall [33]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.564, + 0.906, + 0.654 + ], + "angle": 0, + "content": "For text-conditional image generation, we progressively train PixelFlow from \\(256 \\times 256\\) up to \\(1024 \\times 1024\\) resolution. We include qualitative comparisons with current start-of-the-art generative models, along with quantitative assessments on popular benchmarks such as T2I-CompBench [27], GenEval [19], and DPG-Bench [26]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.661, + 0.657, + 0.677 + ], + "angle": 0, + "content": "4.2. Model Design" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.684, + 0.906, + 0.849 + ], + "angle": 0, + "content": "Kickoff sequence length. In principle, PixelFlow can be trained to progressively increase resolution from very low resolution (e.g., \\(1 \\times 1\\)) up to the target resolution. However, this approach is inefficient in practice, as tokens at extremely low resolutions convey limited meaningful information. Furthermore, allocating excessive timesteps to very short sequences underutilizes the computational capacity of modern GPUs, resulting in decreased model FLOPS utilization. Therefore, we explore how varying the resolution at which image generation begins, which we call kickoff image resolution, impacts overall performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.85, + 0.906, + 0.88 + ], + "angle": 0, + "content": "For our transformer-based backbone, the number of tokens involved in attention operations is determined by the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.532, + 0.888, + 0.871, + 0.9 + ], + "angle": 0, + "content": "" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.098, + 0.09, + 0.478, + 0.16 + ], + "angle": 0, + "content": "
kickoff seq. len.FID ↓sFID ↓IS ↑Precision ↑Recall ↑
32×323.346.1184.750.780.57
8×83.216.2378.500.780.56
2×23.496.4567.810.780.54
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.171, + 0.483, + 0.214 + ], + "angle": 0, + "content": "Table 1. Effect of kickoff sequence length. All models are trained with 600k iterations on ImageNet-1K. Patch size is \\(2 \\times 2\\) and target image resolution is \\(64 \\times 64\\)." + }, + { + "type": "table", + "bbox": [ + 0.098, + 0.23, + 0.478, + 0.432 + ], + "angle": 0, + "content": "
patch sizeFID ↓sFID ↓IS ↑Precision ↑Recall ↑speed†
target res.64×64; kickoff seq.len.2×2; 600K iters
2×23.496.4567.810.780.541.28
4×43.415.5268.830.770.560.58
target res.256×256; kickoff seq.len.2×2; 100K iters
2×228.506.4047.370.580.5330.88
4×433.177.7142.290.570.527.31
8×847.509.6331.190.450.503.96
target res.256×256; kickoff seq.len.2×2; 1600K iters; EMA
4×42.815.48251.790.820.557.31
8×84.655.42195.500.790.543.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.441, + 0.483, + 0.538 + ], + "angle": 0, + "content": "Table 2. Effect of patch size. All models have a kickoff sequence length of \\(2 \\times 2\\). Upper: target resolution of \\(64 \\times 64\\); Middle: target resolution of \\(256 \\times 256\\) resolution, training with 100K iterations due to computational constraints of patch size \\(2 \\times 2\\); Bottom: Extended training to 1600K iterations at \\(256 \\times 256\\) resolution.†Speed measured as number of seconds per sample on a single GPU with a batchsize of 50." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.568, + 0.483, + 0.688 + ], + "angle": 0, + "content": "raw image resolution and the patch size. In this experiment, we maintain a consistent patch size of \\(2 \\times 2\\) [45], making the kickoff sequence length directly dependent on the kickoff image resolution. Specifically, we evaluate three kickoff sequence length—\\(2 \\times 2\\), \\(8 \\times 8\\), and \\(32 \\times 32\\) while keeping the target resolution fixed at \\(64 \\times 64\\). Notably, the \\(32 \\times 32\\) setting represents a vanilla pixel-based approach without cascading across resolutions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.689, + 0.483, + 0.902 + ], + "angle": 0, + "content": "As shown in Table 1, among these configurations, the \\(8 \\times 8\\) kickoff sequence length achieves comparable or even slightly improved FID compared to the \\(32 \\times 32\\) baseline. This suggests that initiating generation from an appropriately smaller resolution and progressively scaling up can maintain generation quality while improving computational efficiency by allocating fewer computations to the largest resolution stage. Conversely, reducing the kickoff sequence length further to \\(2 \\times 2\\) results in a performance degradation, likely because tokens at extremely low resolutions provide limited useful information and insufficient guidance for subsequent generation steps. Taking into account both generation quality and computational efficiency, we therefore adopt \\(8 \\times 8\\) as our default kickoff sequence length." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.09, + 0.908, + 0.174 + ], + "angle": 0, + "content": "
stepFID ↓sFID ↓IS ↑Precision ↑Recall ↑
103.395.98255.270.800.54
202.535.53272.130.820.56
302.515.82274.920.820.56
402.556.58272.680.810.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.176, + 0.908, + 0.201 + ], + "angle": 0, + "content": "(a) Effect of number of steps per stage. CFG is a global constant value 1.50, sample function is Euler." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.206, + 0.908, + 0.262 + ], + "angle": 0, + "content": "
solverFID ↓sFID ↓IS ↑Precision ↑Recall ↑
Euler2.515.82274.920.820.56
Dopri52.435.38282.200.830.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.264, + 0.907, + 0.301 + ], + "angle": 0, + "content": "(b) Effect of sample function. CFG is a global constant value 1.50, the number of steps per stage is 30 in Euler, the absolute tolerance is 1e-6 in Dopri5." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.306, + 0.908, + 0.363 + ], + "angle": 0, + "content": "
cfg schedulecfg max valueFID ↓IS ↑
global constant1.502.43282.2
stage-wise constant2.401.98282.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.364, + 0.907, + 0.39 + ], + "angle": 0, + "content": "(c) Effect of classifier-free guidance (CFG) setting. Sample function is Dopri5 with absolute tolerance 1e-6." + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.405, + 0.906, + 0.448 + ], + "angle": 0, + "content": "Table 3. Inference Setting. The best performance is obtained by CFG step-wise constant with maximum value 2.40 and Dopri5 sample function." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.475, + 0.907, + 0.642 + ], + "angle": 0, + "content": "Patch size. Next, we investigate the impact of patch size on model performance while maintaining a kickoff sequence length of \\(2 \\times 2\\). Initially, we experiment with a target resolution of \\(64 \\times 64\\) and compare two patch sizes—\\(2 \\times 2\\) and \\(4 \\times 4\\)—with results presented in the upper section of Table 2. We observe that PixelFlow achieves very similar performance across these two settings, with the \\(4 \\times 4\\) patch slightly outperforming the \\(2 \\times 2\\) patch on four out of five evaluation metrics. Furthermore, using a patch size of \\(4 \\times 4\\) eliminates the highest-resolution stage required by the \\(2 \\times 2\\) patch size configuration, thus improving efficiency." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.642, + 0.909, + 0.838 + ], + "angle": 0, + "content": "When scaling to a larger target resolution (i.e., \\(256 \\times 256\\)), employing a patch size of \\(2 \\times 2\\) becomes computationally infeasible due to substantial resource demands, limiting our experiments to only 100K training iterations (middle section of Table 2). This constraint necessitates adopting larger patch sizes. Although increasing the patch size further to \\(8 \\times 8\\) significantly enhances computational efficiency, it leads to a noticeable drop in performance quality. Moreover, this performance gap persists even after extended training (1600K iterations), as shown in the bottom section of Table 2. Considering both generation quality and computational cost, we therefore select a patch size of \\(4 \\times 4\\) as our default setting." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.848, + 0.697, + 0.864 + ], + "angle": 0, + "content": "4.3. Inference Schedule" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.908, + 0.903 + ], + "angle": 0, + "content": "In Table 3, we provide a detailed analysis of the inference configuration space, including the number of inference" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.093, + 0.09, + 0.907, + 0.483 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.148, + 0.493, + 0.848, + 0.507 + ], + "angle": 0, + "content": "Figure 4. Qualitative results of class-conditional image generation of PixelFlow. All images are \\(256 \\times 256\\) resolution." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.531, + 0.483, + 0.562 + ], + "angle": 0, + "content": "steps at each resolution stage, the choice of ODE solver, and the scheduling of classifier-free guidance (CFG)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.58, + 0.483, + 0.686 + ], + "angle": 0, + "content": "Number of sample steps. In Table 3a, we evaluate the impact of the number of inference steps per resolution stage on generation quality. As the number of steps increases, we observe consistent improvements in FID, sFID, and IS, with the best overall performance achieved at 30 steps. Beyond this point, gains saturate and even slightly decline, indicating diminishing returns." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.686, + 0.483, + 0.806 + ], + "angle": 0, + "content": "A notable advantage of PixelFlow is its flexibility in assigning different numbers of sampling steps to each resolution stage during inference. This adaptive configuration allows fine-grained control over the sampling process, enabling performance-efficiency trade-offs. Moving beyond a uniform setting and exploring more granular stage-specific step allocations holds the potential for further performance enhancements." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.483, + 0.901 + ], + "angle": 0, + "content": "ODE Solver. We further investigate the effect of the ODE solver type on generation quality. As shown in Table 3b, we compare the first-order Euler solver with the adaptive higher-order Dormand-Prince (Dopri5) solver [14]. The results indicate that Dopri5 consistently outperforms Euler" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.531, + 0.907, + 0.637 + ], + "angle": 0, + "content": "across most evaluation metrics, achieving lower FID and sFID scores, a higher Inception Score, and slightly better precision, while maintaining similar recall. This demonstrates that more accurate and adaptive solvers, such as Dopri5, can better capture the generative dynamics, leading to higher-quality samples—though often with increased computational cost." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.658, + 0.909, + 0.81 + ], + "angle": 0, + "content": "CFG Schedule. Inspired by the recent process [5, 34, 63], we propose a stage-wise CFG schedule, where different stages apply different CFG values, and from the early stage to the later stage, the value increases from 1 to \\(\\mathrm{CFG}_{\\mathrm{max}}\\). In the condition of 4 stages, we find that 0, 1/6, 2/3 and 1 of the \\((\\mathrm{CFG}_{\\mathrm{max}} - 1)\\) give the best FID performance. The comparison between global constant CFG and stage-wise CFG is shown in Table 3c, in which we search the best CFG value for each method. Our proposed stage-wise CFG boosts the FID performance from 2.43 to 1.98." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.833, + 0.842, + 0.849 + ], + "angle": 0, + "content": "4.4. Comparison on ImageNet Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.908, + 0.901 + ], + "angle": 0, + "content": "In Table 4, we compare PixelFlow with both latent-based and pixel-based image generation models on the ImageNet \\(256 \\times 256\\) benchmark. PixelFlow achieves an FID of 1.98," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.48, + 0.394 + ], + "angle": 0, + "content": "
ModelFID ↓ sFID ↓ IS ↑Precision ↑Recall ↑
Latent Space
LDM-4-G [50]3.60-247.70.870.48
DiT-XL/2 [45]2.274.60278.20.830.57
SiT-XL/2 [42]2.064.49277.50.830.59
Pixel Space
ADM-G [13]4.595.25186.70.820.52
ADM-U [13]3.946.14215.80.830.53
CDM [22]4.88-158.7--
RIN [9, 28]3.42-182.0--
SD, U-ViT-L [24]2.77-211.8--
MDM [20]3.51----
StyleGAN-XL [54]2.304.02265.10.780.53
VDM++ [31]2.12-267.7--
PaGoDA [30]1.56-259.6-0.59
SiD2 [25]1.38----
JetFormer [61]6.64--0.690.56
FractalMAR-H [37]6.15-348.90.810.46
PixelFlow (ours)1.985.83282.10.810.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.404, + 0.483, + 0.448 + ], + "angle": 0, + "content": "Table 4. Comparisons on class-conditional image generation on ImageNet \\(256 \\times 256\\). PixelFlow achieves competitive performance compared with latent space based models." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.476, + 0.483, + 0.582 + ], + "angle": 0, + "content": "representing highly competitive performance relative to state-of-the-art latent-space methods. For instance, it outperforms LDM [50] (FID 3.60), DiT [45] (FID 2.27), and SiT [42] (FID 2.06), while achieving comparable IS and recall scores. These results highlight the effectiveness of our design, suggesting that PixelFlow can serve as a strong prototype for high-quality visual generation systems." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.583, + 0.484, + 0.659 + ], + "angle": 0, + "content": "Compared with recent pixel-based models, PixelFlow achieves superior sample quality. It notably outperforms FractalMAR-H [37], and also delivers competitive or better results than strong baselines like ADM-U [13], SiD2 [25], and VDM++ [31]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.484, + 0.722 + ], + "angle": 0, + "content": "We visualize class-conditional image generation of PixelFlow at \\(256 \\times 256\\) resolution in Figure 4. We can observe our model is able to generate images of high visual quality across a wide range of classes." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.741, + 0.328, + 0.758 + ], + "angle": 0, + "content": "4.5. Text-to-Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.765, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Settings. We adopt a two-stage training strategy for text-to-image generation of PixelFlow. First, the model is initialized with an ImageNet-pretrained checkpoint at a resolution of \\(256 \\times 256\\) and trained on a subset of the LAION dataset [55] at the same resolution. In the second stage, we fine-tune the model on a curated set of high-aesthetic-quality images at a higher resolution of \\(512 \\times 512\\). All reported results for PixelFlow are based on this final \\(512 \\times 512\\) resolution model." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.089, + 0.908, + 0.34 + ], + "angle": 0, + "content": "
MethodGenEval OverallT2I-CompBenchDPG Bench
ColorShapeTexture
SDv1.5 [50]0.430.37300.36460.421963.18
DALL-E 2 [49]0.520.57500.54640.6374-
SDv2.1 [50]0.500.56940.44950.4982-
SDXL [47]0.550.63690.54080.563774.65
PixArt-α [6]0.480.68860.55820.704471.11
DALL-E 3 [3]0.67†0.8110†0.6750†0.8070†83.50†
GenTron [7]-0.76740.57000.7150-
SD3 [17]0.74----
Transfusion [70]0.63----
LlamaGen [59]0.32----
Emu 3 [64]0.66†0.7913†0.5846†0.7422†80.60
PixelFlow (ours)0.600.75780.45290.600677.93
0.64†0.7689†0.5059†0.6273†
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.35, + 0.907, + 0.407 + ], + "angle": 0, + "content": "Table 5. Comparison with state-of-the-art models on text-to-image generation benchmarks. We evaluate on GenEval [19], T2I-CompBench [27] and DPG-Bench [26]. We use \\(\\dagger\\) to indicate the result with prompt rewriting." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.434, + 0.909, + 0.662 + ], + "angle": 0, + "content": "To comprehensively evaluate the performance of PixelFlow-T2I in text-to-image generation, we employ three widely recognized benchmarks, each targeting a different facet of compositional understanding: T2I-CompBench [27] assesses alignment between generated images and complex semantic relationships in text. We evaluate three tasks—color, shape, and texture binding—by generating five images per prompt across 300 prompts per sub-task. Alignment is measured using BLIP-VQA[36]; GenEval [19] evaluates compositional aspects such as coherence and spatial arrangement. We generate over 2,000 images from 553 prompts and report the average performance across tasks; DPG-Bench [26] focuses on complex textual descriptions, with 4,000 images generated from 1,065 prompts and results averaged across tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.683, + 0.909, + 0.851 + ], + "angle": 0, + "content": "Quantitative results. As shown in Table 5, PixelFlow achieves competitive performance across all benchmarks, demonstrating strong compositional understanding in freeform text-to-image generation. It performs particularly well on T2I-CompBench, with high scores in color and texture binding, and solid results on GenEval (0.64) and DPG-Bench (77.93), surpassing many established models. These results underscore PixelFlow as a promising direction for pixel-space image generation conditioned on natural language—showcasing its potential for open-ended, text-driven image synthesis." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.908, + 0.903 + ], + "angle": 0, + "content": "Visualization. We visualize the intermediate results during the sampling process in Figure 3, specifically show" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.091, + 0.411, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.333, + 0.408, + 0.404 + ], + "angle": 0, + "content": "A native Warrior shaman Bengal Cat with a black and white leopard pattern, blue eyes, short fur, and portrait pose, colorful feathers and colorful ornaments, a regal oil-style portrait of the queen of native Kitty shaman white Cat with wings and headdress. Nordic is kind and motherly, it has black eye makeup and her hair is in messy." + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.407, + 0.252, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.525, + 0.247, + 0.555 + ], + "angle": 0, + "content": "1940s vintage colored photo of a well-groomed man, crew cut hair, front view, kodak portrait film" + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.407, + 0.385, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.525, + 0.407, + 0.575 + ], + "angle": 0, + "content": "A cute 3 year old Chinese girl with a big head and a small body, hair is fluffy and messy tied in a pill head, big eyes, one eye blinking, doe mouth, playful and cute." + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.09, + 0.574, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.423, + 0.208, + 0.574, + 0.247 + ], + "angle": 0, + "content": "An extremely happy American Cocker Spaniel is smiling and looking up at the camera with his head tilted to one side." + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.089, + 0.735, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.582, + 0.207, + 0.734, + 0.237 + ], + "angle": 0, + "content": "Full body portrait of deer by side, visible realistic, with style as a painting in the style by Caravaggio" + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.256, + 0.574, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.421, + 0.375, + 0.568, + 0.404 + ], + "angle": 0, + "content": "Greeting card, party, hyped animal, open mouth, surprised excitement" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.256, + 0.735, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.585, + 0.375, + 0.735, + 0.405 + ], + "angle": 0, + "content": "Super cute clay world, isometric view of Eiffel Tower in Paris, cute clay stop motion animation, people" + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.407, + 0.574, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.422, + 0.525, + 0.57, + 0.566 + ], + "angle": 0, + "content": "Close-up of an aged man with weathered features and sharp blue eyes peering wisely from beneath a tweed flat cap." + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.409, + 0.732, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.582, + 0.528, + 0.732, + 0.558 + ], + "angle": 0, + "content": "A white bearded man's face emerges from a cloud of white butterflies, background is white" + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.091, + 0.897, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.209, + 0.899, + 0.257 + ], + "angle": 0, + "content": "A digital art piece featuring a splitface portrait of a woman. The left side of face is in a calm, while the right side shows a more intense and red color" + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.257, + 0.897, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.374, + 0.893, + 0.403 + ], + "angle": 0, + "content": "A baby cat stands on two legs. facing forward, wearing an Indian classical gloves and shoes." + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.404, + 0.897, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.528, + 0.896, + 0.568 + ], + "angle": 0, + "content": "Johannes Vermeer, panda wearing pearl earrings, blue headbands, artwork Girl with a Pearl Earring oil painting," + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.591, + 0.907, + 0.619 + ], + "angle": 0, + "content": "Figure 5. Qualitative results of text-conditional generation of PixelFlow. All images are \\(512 \\times 512\\) resolution. Key components of the prompt are highlighted in RED." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.645, + 0.483, + 0.812 + ], + "angle": 0, + "content": "ing the final step of each resolution stage. As resolution increases, a clear denoising trend emerges—images become progressively cleaner and less noisy at each stage. Additional generated samples along with their input text prompts are shown in Figure 5 (512×512) and Figure 6 (1024×1024). PixelFlow demonstrates high visual fidelity and strong text-image alignment, effectively capturing key visual elements and their relationships from complex prompts. Notably, it generates fine-grained details—such as animal fur, human hair, and hat textures—highlighting its strong attention to detail in pixel space." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.829, + 0.21, + 0.845 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.483, + 0.903 + ], + "angle": 0, + "content": "We introduce PixelFlow, a novel image generation model that re-think the predominance of latent space based models by directly operating on raw pixel space. By directly" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.645, + 0.907, + 0.768 + ], + "angle": 0, + "content": "transforming between different resolution stages, our model exhibits a compelling advantage in simplicity and end-to-end trainability. On both class-conditional image generation and text-to-image generation benchmarks, PixelFlow has been proven to demonstrate competitive image generation capabilities compared to popular latent space-based methods. We hope that this new perspective will inspire future research in visual generation models." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.778, + 0.909, + 0.9 + ], + "angle": 0, + "content": "Limitations Despite its advantages, PixelFlow still faces certain limitations. Although the model avoids full-resolution computation across all stages, the final stage requires full-resolution attention, which accounts for roughly \\(80\\%\\) of the total inference time. Moreover, we observe that training convergence slows as the sequence length increases. Addressing these challenges presents opportunities for future improvements in efficiency and scalability." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.18, + 0.089, + 0.621, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.431, + 0.614, + 0.468 + ], + "angle": 0, + "content": "Raspberry in the form of women walk along the path of a fairy tale forest. She carries a jug of water with her. Her head is made of one big raspberry on which she has big and beautiful eyes, as well as nose and mouth." + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.089, + 0.802, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.631, + 0.238, + 0.816, + 0.28 + ], + "angle": 0, + "content": "An embroidered sweater with an anatomical illustration of the human torso and chest, the skin is open to reveal the internal anatomy." + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.282, + 0.821, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.631, + 0.431, + 0.819, + 0.474 + ], + "angle": 0, + "content": "Prototype flying fox made from blown glass, Lino Tagliapietra style Muranean glassmaking, intricate details." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.477, + 0.333, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.18, + 0.599, + 0.327, + 0.632 + ], + "angle": 0, + "content": "Photorealistic, 4k, a micro baby African Buffalo perched on a coffee cup" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.633, + 0.334, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.755, + 0.33, + 0.82 + ], + "angle": 0, + "content": "Great Dane Dog sitting on a toilet bowl in wide bathroom, reading a large double page spread newspaper, sit like human. The background is in a white room." + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.479, + 0.495, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.343, + 0.599, + 0.49, + 0.631 + ], + "angle": 0, + "content": "A picture of Joe rogan's head on a cat's body, sitting behind a podcasting microphone." + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.633, + 0.495, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.343, + 0.755, + 0.49, + 0.82 + ], + "angle": 0, + "content": "Full body shot of balenciaga fashion model and parrot hybrid with a human body and the head of the parrot. He is walking through a podium like a model." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.477, + 0.658, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.503, + 0.599, + 0.649, + 0.631 + ], + "angle": 0, + "content": "3D illustration of the chip with text \"AI\" floating above it, with a blue color scheme." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.633, + 0.658, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.507, + 0.754, + 0.654, + 0.831 + ], + "angle": 0, + "content": "Sketch sheet of anatomical studies by Leonardo da Vinci Iron man and weapons, show detailed studies of technology and body, use little soft details in red and gold for the armor, mathematic." + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.477, + 0.821, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.666, + 0.599, + 0.817, + 0.631 + ], + "angle": 0, + "content": "The world's smallest laughing baby Piggy, perched on someone's finger." + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.633, + 0.82, + 0.752 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.666, + 0.755, + 0.813, + 0.82 + ], + "angle": 0, + "content": "Telephoto lens shooting, panoramic view, a white sheep struggling desperately under the sea, with bubbles constantly popping out of its mouth, realistic and lifelike." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.849, + 0.908, + 0.876 + ], + "angle": 0, + "content": "Figure 6. Qualitative samples of PixelFlow. We present the generated images of \\(1024 \\times 1024\\) resolution. Key words are highlighted in RED." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.484, + 0.17 + ], + "angle": 0, + "content": "[1] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In The Eleventh International Conference on Learning Representations, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.171, + 0.484, + 0.242 + ], + "angle": 0, + "content": "[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.242, + 0.483, + 0.311 + ], + "angle": 0, + "content": "[3] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.opennai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.312, + 0.483, + 0.382 + ], + "angle": 0, + "content": "[4] Andrew Campbell, William Harvey, Christian Dietrich Weilbach, Valentin De Bortoli, Tom Rainforth, and Arnaud Doucet. Trans-dimensional generative modeling via jump diffusion models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.382, + 0.483, + 0.438 + ], + "angle": 0, + "content": "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.439, + 0.483, + 0.507 + ], + "angle": 0, + "content": "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.509, + 0.484, + 0.592 + ], + "angle": 0, + "content": "[7] Shoufa Chen, Mengmeng Xu, Jiawei Ren, Yuren Cong, Sen He, Yanping Xie, Animesh Sinha, Ping Luo, Tao Xiang, and Juan-Manuel Perez-Rua. Gentron: Diffusion transformers for image and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6441-6451, 2024. 1, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.593, + 0.483, + 0.661 + ], + "angle": 0, + "content": "[8] Shoufa Chen, Chongjian Ge, Yuqi Zhang, Yida Zhang, Fengda Zhu, Hao Yang, Hongxiang Hao, Hui Wu, Zhichao Lai, Yifei Hu, et al. Goku: Flow based video generative foundation models. arXiv preprint arXiv:2502.04896, 2025.1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.663, + 0.483, + 0.69 + ], + "angle": 0, + "content": "[9] Ting Chen. On the importance of noise scheduling for diffusion models. arXiv preprint arXiv:2301.10972, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.692, + 0.483, + 0.76 + ], + "angle": 0, + "content": "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instructionfinetuned language models. Journal of Machine Learning Research, 25(70):1-53, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.761, + 0.483, + 0.844 + ], + "angle": 0, + "content": "[11] Mostafa Dehghani, Basil Mustafa, Josip Djolonga, Jonathan Heek, Matthias Minderer, Mathilde Caron, Andreas Steiner, Joan Puigcerver, Robert Geirhos, Ibrahim M Alabdul-mohsin, et al. Patch n'pack: Navit, a vision transformer for any aspect ratio and resolution. Advances in Neural Information Processing Systems, 36, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.846, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.135 + ], + "angle": 0, + "content": "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.136, + 0.905, + 0.176 + ], + "angle": 0, + "content": "[14] John R Dormand and Peter J Prince. A family of embedded runge-kutta formulae. Journal of computational and applied mathematics, 6(1):19-26, 1980. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.177, + 0.905, + 0.217 + ], + "angle": 0, + "content": "[15] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.218, + 0.905, + 0.274 + ], + "angle": 0, + "content": "[16] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.275, + 0.905, + 0.357 + ], + "angle": 0, + "content": "[17] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1, 2, 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.358, + 0.905, + 0.412 + ], + "angle": 0, + "content": "[18] Zach Evans, CJ Carr, Josiah Taylor, Scott H Hawley, and Jordi Pons. Fast timing-conditioned latent audio diffusion. In *Forty-first International Conference on Machine Learning*, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.413, + 0.905, + 0.469 + ], + "angle": 0, + "content": "[19] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36, 2024. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.47, + 0.905, + 0.524 + ], + "angle": 0, + "content": "[20] Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Joshua M Susskind, and Navdeep Jaitly. Matryoshka diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.525, + 0.905, + 0.553 + ], + "angle": 0, + "content": "[21] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.554, + 0.907, + 0.608 + ], + "angle": 0, + "content": "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. Journal of Machine Learning Research, 23(47):1-33, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.609, + 0.905, + 0.663 + ], + "angle": 0, + "content": "[23] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.664, + 0.905, + 0.72 + ], + "angle": 0, + "content": "[24] Emiel Hoogeboom, Jonathan Heek, and Tim Salimans. simple diffusion: End-to-end diffusion for high resolution images. In International Conference on Machine Learning, pages 13213-13232. PMLR, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.721, + 0.905, + 0.775 + ], + "angle": 0, + "content": "[25] Emiel Hoogeboom, Thomas Mensink, Jonathan Heek, Kay Lamerigts, Ruiqi Gao, and Tim Salimans. Simpler diffusion (sid2): 1.5 fid on imagenet512 with pixel-space diffusion. arXiv preprint arXiv:2410.19324, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.776, + 0.905, + 0.831 + ], + "angle": 0, + "content": "[26] Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[27] Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023. 4, 7" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.134 + ], + "angle": 0, + "content": "[28] Allan Jabri, David Fleet, and Ting Chen. Scalable adaptive computation for iterative generation. arXiv preprint arXiv:2212.11972, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.136, + 0.483, + 0.205 + ], + "angle": 0, + "content": "[29] Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.3,4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.207, + 0.483, + 0.275 + ], + "angle": 0, + "content": "[30] Dongjun Kim, Chieh-Hsin Lai, Wei-Hsiang Liao, Yuhta Takida, Naoki Murata, Toshimitsu Uesaka, Yuki Mitsufuji, and Stefano Ermon. Pagoda: Progressive growing of a one-step generator from a low-resolution diffusion teacher. arXiv preprint arXiv:2405.14822, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.277, + 0.483, + 0.33 + ], + "angle": 0, + "content": "[31] Diederik Kingma and Ruiqi Gao. Understanding diffusion objectives as the elbo with simple data augmentation. Advances in Neural Information Processing Systems, 36, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.333, + 0.483, + 0.375 + ], + "angle": 0, + "content": "[32] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.377, + 0.483, + 0.432 + ], + "angle": 0, + "content": "[33] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.434, + 0.483, + 0.502 + ], + "angle": 0, + "content": "[34] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.504, + 0.483, + 0.531 + ], + "angle": 0, + "content": "[35] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.533, + 0.483, + 0.601 + ], + "angle": 0, + "content": "[36] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.603, + 0.483, + 0.644 + ], + "angle": 0, + "content": "[37] Tianhong Li, Qinyi Sun, Lijie Fan, and Kaiming He. Fractal generative models. arXiv preprint arXiv:2502.17437, 2025.2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.646, + 0.483, + 0.702 + ], + "angle": 0, + "content": "[38] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In The Eleventh International Conference on Learning Representations, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.704, + 0.483, + 0.758 + ], + "angle": 0, + "content": "[39] Haohe Liu, Zehua Chen, Yi Yuan, Xinhao Mei, Xubo Liu, Danilo Mandic, Wenwu Wang, and Mark D Plumbley. Audioldm: Text-to-audio generation with latent diffusion models. arXiv preprint arXiv:2301.12503, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.76, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[40] Xingchao Liu, Chengyue Gong, and qiang liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. In The Eleventh International Conference on Learning Representations, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.483, + 0.858 + ], + "angle": 0, + "content": "[41] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.86, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.121, + 0.905, + 0.162 + ], + "angle": 0, + "content": "[43] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.163, + 0.905, + 0.204 + ], + "angle": 0, + "content": "[44] NVIDIA. Edify image: High-quality image generation with pixel space laplacian diffusion model. arXiv preprint arXiv:2411.07126, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.205, + 0.905, + 0.259 + ], + "angle": 0, + "content": "[45] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 1, 2, 3, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.261, + 0.905, + 0.316 + ], + "angle": 0, + "content": "[46] Pablo Pernias, Dominic Rampas, Mats L Richter, Christopher J Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. arXiv preprint arXiv:2306.00637, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.317, + 0.905, + 0.385 + ], + "angle": 0, + "content": "[47] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.386, + 0.905, + 0.442 + ], + "angle": 0, + "content": "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International conference on machine learning, pages 8821-8831. Pmlr, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.443, + 0.905, + 0.496 + ], + "angle": 0, + "content": "[49] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.498, + 0.907, + 0.567 + ], + "angle": 0, + "content": "[50] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.568, + 0.905, + 0.65 + ], + "angle": 0, + "content": "[51] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.651, + 0.905, + 0.718 + ], + "angle": 0, + "content": "[52] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image superresolution via iterative refinement. IEEE transactions on pattern analysis and machine intelligence, 45(4):4713-4726, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.72, + 0.905, + 0.775 + ], + "angle": 0, + "content": "[53] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.776, + 0.905, + 0.817 + ], + "angle": 0, + "content": "[54] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[55] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 7" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.16 + ], + "angle": 0, + "content": "[56] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pages 2256-2265. PMLR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.164, + 0.482, + 0.232 + ], + "angle": 0, + "content": "[57] Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, et al. Ldm3d: Latent diffusion model for 3d. arXiv preprint arXiv:2305.10853, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.234, + 0.482, + 0.288 + ], + "angle": 0, + "content": "[58] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.292, + 0.482, + 0.346 + ], + "angle": 0, + "content": "[59] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.349, + 0.482, + 0.403 + ], + "angle": 0, + "content": "[60] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.406, + 0.482, + 0.459 + ], + "angle": 0, + "content": "[61] Michael Tschannen, André Susano Pinto, and Alexander Kolesnikov. Jetformer: An autoregressive generative model of raw images and text. arXiv preprint arXiv:2411.19722, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.462, + 0.482, + 0.517 + ], + "angle": 0, + "content": "[62] Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Neural Information Processing Systems, 2017. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.52, + 0.482, + 0.574 + ], + "angle": 0, + "content": "[63] Xi Wang, Nicolas Dufour, Nefeli Andreou, Marie-Paule Cani, Victoria Fernández Abrevaya, David Picard, and Vicky Kalogeiton. Analysis of classifier-free guidance weight schedulers. arXiv preprint arXiv:2404.13040, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.577, + 0.482, + 0.631 + ], + "angle": 0, + "content": "[64] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.634, + 0.482, + 0.702 + ], + "angle": 0, + "content": "[65] Hanshu Yan, Xingchao Liu, Jiachun Pan, Jun Hao Liew, qiang liu, and Jiashi Feng. PeRFlow: Piecewise rectified flow as universal plug-and-play accelerator. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.704, + 0.482, + 0.771 + ], + "angle": 0, + "content": "[66] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.775, + 0.482, + 0.83 + ], + "angle": 0, + "content": "[67] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. LION: Latent point diffusion models for 3d shape generation. In Advances in Neural Information Processing Systems, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.832, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[68] Shuangfei Zhai, Ruixiang Zhang, Preetum Nakkiran, David Berthelot, Jiatao Gu, Huangjie Zheng, Tianrong Chen, Miguel Angel Bautista, Navdeep Jaitly, and Josh Susskind. Normalizing flows are capable generative models. arXiv preprint arXiv:2412.06329, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.16 + ], + "angle": 0, + "content": "[69] Shilong Zhang, Wenbo Li, Shoufa Chen, Chongjian Ge, Peize Sun, Yida Zhang, Yi Jiang, Zehuan Yuan, Binyue Peng, and Ping Luo. Flashvideo: Flowing fidelity to detail for efficient high-resolution video generation. arXiv preprint arXiv:2502.05179, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.164, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[70] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 7" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..788c87f48407e6082276789bacaa7f1895442b1f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e2050eb6234057d2dcf361bb11ae76124b7785bd74748499154c6c4e4589bb4 +size 3473559 diff --git a/data/2025/2504_07xxx/2504.07963/full.md b/data/2025/2504_07xxx/2504.07963/full.md new file mode 100644 index 0000000000000000000000000000000000000000..58a15f6959b11aff0b9686fc909258ff5e65b0a8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/full.md @@ -0,0 +1,375 @@ +# PixelFlow: Pixel-Space Generative Models with Flow + +Shoufa Chen1 Chongjian Ge1,2 Shilong Zhang1 Peize Sun1 Ping Luo1 +1The University of Hong Kong 2Adobe + +# Abstract + +We present PixelFlow, a family of image generation models that operate directly in the raw pixel space, in contrast to the predominant latent-space models. This approach simplifies the image generation process by eliminating the need for a pre-trained Variational Autoencoder (VAE) and enabling the whole model end-to-end trainable. Through efficient cascade flow modeling, PixelFlow achieves affordable computation cost in pixel space. It achieves an FID of 1.98 on $256 \times 256$ ImageNet class-conditional image generation benchmark. The qualitative text-to-image results demonstrate that PixelFlow excels in image quality, artistry, and semantic control. We hope this new paradigm will inspire and open up new opportunities for next-generation visual generation models. Code and models are available at https://github.com/ShoufaChen/PixelFlow. + +# 1. Introduction + +Numquam ponenda est pluralitas sine necessitate. + +William of Ockham + +Driven by the success of the Stable Diffusion (SD) model series [17, 46, 47, 50], latent diffusion models (LDMs) [50] have emerged as the de facto standard for generative modeling across diverse modalities, spanning image [17, 35, 45], video [7, 8, 23, 66, 69], audio [18, 39], and 3D [57, 67]. As shown in Figure 1 (a), LDMs compress raw data into a compact latent space using pre-trained Variencoders (VAEs). This compression reduces computational demands and facilitates efficient diffusion denoising. Despite their widespread success, LDMs decouple the VAE and diffusion components, hindering joint optimization and complicating holistic diagnosis. + +An alternative approach is to implement diffusion models in the raw pixel space. While intuitive, this becomes computationally unaffordable for high-resolution images due to the substantial resources required to process per-pixel correlations. Considering this, prior research [20, 22, 44, + +![](images/1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg) + +![](images/b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg) +(a) Latent-based Diffusion Models (Two stages) +(b) Pixel-based Diffusion Models (Two stages) + +![](images/cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg) +(c) PixelFlow (End-to-end one stage) +Figure 1. Comparisons of Design Paradigms between latent-based diffusion models (LDMs), pixel-based diffusion models (PDMs), and PixelFlow: (a) LDMs split training into two separate stages—first independently training off-the-shell VAEs, then training diffusion models on tokens extracted from the pre-trained VAEs; (b) Previous PDMs typically train two separate models: a diffusion model on low-resolution images and an upsampler for high-resolution synthesis; (c) PixelFlow, by contrast, offers an end-to-end solution for pixel-based generation, combining both high efficiency and strong generative performance. + +51, 52] has typically adopted a cascaded approach: first generating a low-resolution image, then employing additional upsamplers to produce high-quality outputs, with the low + +resolution image serving as conditioning input, as shown in Figure 1(b). However, these cascaded methods also introduce separate networks for different stages, still limiting the benefits of end-to-end design. + +In this work, we introduce PixelFlow, a simple but effective end-to-end framework for direct image generation in raw pixel space, without the need of separate networks like VAEs or upsamplers. As illustrated in Figure 1(c), PixelFlow uses a unified set of parameters to model multiscale samples across cascading resolutions via Flow Matching [38, 40]. At early denoising stages, when noise levels are high, PixelFlow operates on lower-resolution samples. As denoising progresses, the resolution gradually increases until it reaches the target resolution in the final stage. This progressive strategy avoids performing all denoising steps at full resolution, thereby significantly reducing the overall computational cost of the generation process. + +During training, the cross-scale samples at different timesteps are constructed by: (1) resizing the images to successive scales and adding Gaussian noise to each scaled image; (2) interpolating between adjacent scale noisy images as model input and conducting velocity prediction. The entire model is trained end-to-end using uniformly sampled training examples from all stages. During inference, the process begins with pure Gaussian noise at the lowest resolution. The model then progressively denoises and upscales the image until the target resolution is reached. + +We evaluated PixelFlow on both class-conditional and text-to-image generation tasks. Compared to established latent-space diffusion models [42, 45, 50], PixelFlow delivers competitive performance. For instance, on the $256 \times 256$ ImageNet class-conditional generation benchmark, PixelFlow achieves an FID of 1.98. For text-to-image generation, PixelFlow is evaluated on widely-used benchmarks, achieving 0.64 on GenEval [19] and 77.93 on DPG-Bench [26]. In addition, qualitative results in Figure 5 and Figure 6 illustrate that PixelFlow has strong visual fidelity and text-image alignment, highlighting the potential of pixel-space generation for future research. + +The contributions of PixelFlow are summarized as in the following three points: + +- By eliminating the need for a pre-trained VAE, we establish an end-to-end trainable image generation model in raw pixel space directly. +- Through cascade flow modeling from low resolution to high resolution, our model achieves affordable computation cost in both training and inference. +- PixelFlow obtains competitive performance in visual quality, including 1.98 FID on $256 \times 256$ ImageNet class-conditional image generation benchmark and appealing properties on text-to-image generation. + +# 2. Related Work + +Latent Space Diffusion/Flow Models. Variational Autoencoders (VAEs) have become a core component in many recent generative models [16, 17, 35, 47, 48, 50, 59, 66], enabling the mapping of visual data from pixel space to a lower-dimensional, perceptually equivalent latent space. This compact representation facilitates more efficient training and inference. However, VAEs often compromise high-frequency details [47], leading to inevitable low-level artifacts in generated outputs. Motivated by a desire for algorithmic simplicity and fully end-to-end optimization, we forgo the VAE and operate directly in pixel space. + +Pixel Space Diffusion/Flow Models. Early diffusion models [2, 21, 56] primarily operated directly in pixel space, aiming to capture the distributions images in a single stage. However, this approach proved both challenging and inefficient for high-resolution image generation, leading to the development of cascaded models [20, 22, 30, 52] that generate images through a sequence of stages. These cascaded models typically begin with the generation of a low-resolution image, which is subsequently upscaled by super-resolution models to achieve higher resolutions. However, the diffusion-based super-resolution process often requires starting from pure noise, conditioned on lower-resolution outputs, resulting in a time-consuming and inefficient generation process. Additionally, training these models in isolated stages hinders end-to-end optimization and necessitates carefully designed strategies to ensure the super-resolution stages. + +Furthermore, recent advancements in pixel-space generation have introduced innovative architectures. Simple Diffusion [24, 25] proposes a streamlined diffusion framework for high-resolution image synthesis, achieving strong performance on ImageNet through adjustments of model architecture and noise schedules. FractalGen [37] constructs fractal generative models by recursively invoking atomic generative modules, resulting in self-similar architectures that demonstrate strong performance in pixel-by-pixel image generation. TarFlow [68] presents a Transformer-based normalizing flow architecture capable of directly modeling and generating pixels. + +# 3. PixelFlow + +# 3.1. Preliminary: Flow Matching + +The Flow Matching algorithm [1, 38, 40] progressively transforms a sample from a prior distribution, which is typically a standard normal distribution, to the target data distribution. This is accomplished by defining a forward process consisting of a sequence of linear paths that directly connect samples from the prior distribution to corresponding + +![](images/6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg) +Figure 2. PixelFlow for cascaded image generation from pixel space. We partition the entire generation procedure into series resolution stages. At the beginning of each resolution stage, we upscale the relatively noisy results from the preceding stage and use them as the starting point for the current stage. Consequently, as the resolution enhances, more refined samples can be obtained. + +samples in the target distribution. During training, a training example is constructed by first sampling a target sample $\mathbf{x}_1$ , drawing noise $\mathbf{x}_0 \sim \mathcal{N}(0, 1)$ from the standard normal distribution, and selecting a timestep $t \in [0, 1]$ . The training example is then defined through a linear interpolation: + +$$ +\mathbf {x} _ {t} = t \cdot \mathbf {x} _ {1} + (1 - t) \cdot \mathbf {x} _ {0} \tag {1} +$$ + +The model is trained to approximate the velocity defined by an ordinary differential equation (ODE), $\mathbf{v}_t = \frac{d\mathbf{x}_t}{dt}$ , enabling it to effectively guide the transformation from the intermediate sample $\mathbf{x}_t$ to the real data sample $\mathbf{x}_1$ . + +A notable advantage of Flow Matching is its ability to interpolate between two arbitrary distributions, not restricted to using only a standard Gaussian as the source domain. Consequently, in image generation tasks, Flow Matching extends beyond noise-to-image scenarios and can be effectively employed for diverse applications such as image-to-image translation. + +# 3.2. Multi-Scale Generation in Pixel Space + +PixelFlow generates images by progressively increasing their resolution through a multistage denoising process. To enable this, we construct a multi-scale representation of the target image $\mathbf{x}_1$ by recursively downsampling it by a factor of 2 at each scale. As illustrated in Figure 2, PixelFlow divides the image generation process into $S$ stages. Each stage $s\in 0,1,\dots,S - 1$ operates over a time interval defined by the start and end states $(\mathbf{xt}_0^s,\mathbf{xt}_1^s)$ . In the degenerate case where $S = 1$ , PixelFlow reduces to a standard single-stage flow matching approach for image generation, similar to recent works [17, 42], but crucially operates in pixel space rather than latent space. + +For each stage $s$ , we define the starting and ending states as follows: + +$$ +\text {S t a r t :} \quad \mathbf {x} _ {t _ {0} ^ {s}} = t _ {0} ^ {s} \cdot \operatorname {U p} \left(\operatorname {D o w n} \left(\mathbf {x} _ {1}, 2 ^ {s + 1}\right)\right) + \left(1 - t _ {0} ^ {s}\right) \cdot \epsilon \tag {2} +$$ + +$$ +\text {E n d}: \quad \mathbf {x} _ {t _ {1} ^ {s}} = t _ {1} ^ {s} \cdot \operatorname {D o w n} \left(\mathbf {x} _ {1}, 2 ^ {s}\right) + \left(1 - t _ {1} ^ {s}\right) \cdot \epsilon , \tag {3} +$$ + +where $\text{Down}(\cdot)$ and $\text{Up}(\cdot)$ denote the downsampling and upsampling operations, respectively. Unless otherwise stated, we adopt bilinear interpolation for downsampling and nearest neighbor for upsampling. + +To train the model, we sample intermediate representations by linearly interpolating between the start and end states: + +$$ +\mathbf {x} _ {t _ {\tau} ^ {s}} = \tau \cdot \mathbf {x} _ {t _ {1} ^ {s}} + (1 - \tau) \cdot \mathbf {x} _ {t _ {0} ^ {s}}, \tag {4} +$$ + +where $\tau = \frac{t - t_0^s}{t_1^s - t_0^s}$ is the rescaled timestep [29, 65] within the $s$ -th stage. + +Then our objective is to train a model $\mu_{\theta}(\cdot)$ to predict the velocity $\mu_{\theta}(\mathbf{x}_{t_{\tau}^{s},\tau})$ with target as $\mathbf{v}_t = \mathbf{x}_{t_1^s} - \mathbf{x}_{t_0^s}$ . We use the mean squared error (MSE) loss, formally represented as: + +$$ +\left. \mathbb {E} _ {s, t, \left(\mathbf {x} _ {t _ {1} ^ {s}}, \mathbf {x} _ {t _ {1} ^ {s}}\right)} \right\rvert \left\| \mu_ {\theta} \left(\mathbf {x} _ {t _ {\tau} ^ {s}}, \tau\right) - \mathbf {v} _ {t} \right\| ^ {2} \tag {5} +$$ + +# 3.3. Model Architecture + +We instantiate $\mu_{\theta}(\cdot)$ using a Transformer-based architecture [62], chosen for its simplicity, scalability, and effectiveness in generative modeling. Specifically, our implementation is based on the standard Diffusion Transformer (DiT) [45], employing XL-scale configurations across all experiments. To better align with the PixelFlow framework, we introduce several modifications, as detailed below. + +Patchify. Following the Vision Transformer (ViT) design [15, 45], the first layer of PixelFlow is a patch embedding layer, which converts the spatial representation of the input image into a 1D sequence of tokens via a linear projection. In contrast to prior latent transformers [17, 42, 45] that operate on VAE-encoded latents, PixelFlow directly tokenizes raw pixel inputs. To support efficient attention across multiple resolutions within a batch, we apply a sequence packing strategy [11], concatenating flattened token sequences of varying lengths—corresponding to different resolutions—along the sequence dimension. + +RoPE. After patchfying, we replace the original sincos positional encoding [45] with RoPE [58] to better handle varying image resolutions. RoPE has shown strong performance in enabling length extrapolation, particularly in large language models. To adapt it for 2D image data, we apply 2D-RoPE by independently applying 1D-RoPE to the height and width dimensions, with each dimension occupying half of the hidden state. + +![](images/ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg) +Figure 3. Visualization of intermediate result of cascaded stages. We extract the intermediate results from each of the four stages for direct visualization. We observed a clear denoising process at various resolution stages. + +Resolution Embedding. Since PixelFlow operates across multiple resolutions using a shared set of model parameters, we introduce an additional resolution embedding to distinguish between resolutions. Specifically, we use the absolute resolution of the feature map after patch embedding as a conditional signal. This signal is encoded using sinusoidal position embedding [62] and added to the timestep embedding before being passed into the model. + +Text-to-Image Generation. While class-conditional image generation typically integrates conditioning information through adaptive layer normalization (adaLN)[45], we extend PixelFlow to support text-to-image generation by introducing a cross-attention layer after each self-attention layer within every Transformer block [6, 7]. This design allows the model to effectively align visual features with the textual input at every stage of the generation process. Following recent work [8, 59], we adopt the Flan-T5-XL language model [10] to extract rich text embeddings, which serve as conditioning signals throughout the network. + +# 3.4. Training and Inference + +To facilitate efficient training, we uniformly sample training examples from all resolution stages using the interpolation scheme defined in Equation (4). Additionally, we employ the sequence packing technique [11], which enables joint training of scale-variant examples within a single minibatch, improving both efficiency and scalability. + +During inference, the generation process begins with pure Gaussian noise at the lowest resolution and progressively transitions to higher resolutions through multiple stages. Within each resolution stage, we apply standard flow-based sampling, using either the Euler discrete sampler [17] or the Dopri5 solver, depending on the desired trade-off between speed and accuracy. To ensure smooth and coherent transitions across scales, we adopt an ronoising strategy [29, 60], which effectively mitigates the jumping point issue [4] often observed in multi-scale generation pipelines. + +# 4. Experiments + +In this section, we first detail our experimental setup in Sec. 4.1. Subsequently, we analyze key components of our approach, including model design (Sec. 4.2) and inference configurations (Sec. 4.3). Finally, we benchmark PixelFlow against state-of-the-art methods on class- (Sec. 4.4) and text-to-image (Sec. 4.5) generation tasks. + +# 4.1. Experimental Setup + +We evaluate PixelFlow for class-conditional image generation on the ImageNet-1K [12] dataset. Unless stated otherwise, we train PixelFlow at $256 \times 256$ resolution. All models are trained using the AdamW optimizer [32, 41] with a constant learning rate of $1 \times 10^{-4}$ . Performance is primarily measured by Fréchet Inception Distance (FID) using the standard evaluation toolkit1. We also report Inception Score (IS) [53], sFID [43], and Precision/Recall [33]. + +For text-conditional image generation, we progressively train PixelFlow from $256 \times 256$ up to $1024 \times 1024$ resolution. We include qualitative comparisons with current start-of-the-art generative models, along with quantitative assessments on popular benchmarks such as T2I-CompBench [27], GenEval [19], and DPG-Bench [26]. + +# 4.2. Model Design + +Kickoff sequence length. In principle, PixelFlow can be trained to progressively increase resolution from very low resolution (e.g., $1 \times 1$ ) up to the target resolution. However, this approach is inefficient in practice, as tokens at extremely low resolutions convey limited meaningful information. Furthermore, allocating excessive timesteps to very short sequences underutilizes the computational capacity of modern GPUs, resulting in decreased model FLOPS utilization. Therefore, we explore how varying the resolution at which image generation begins, which we call kickoff image resolution, impacts overall performance. + +For our transformer-based backbone, the number of tokens involved in attention operations is determined by the + +
kickoff seq. len.FID ↓sFID ↓IS ↑Precision ↑Recall ↑
32×323.346.1184.750.780.57
8×83.216.2378.500.780.56
2×23.496.4567.810.780.54
+ +Table 1. Effect of kickoff sequence length. All models are trained with 600k iterations on ImageNet-1K. Patch size is $2 \times 2$ and target image resolution is $64 \times 64$ . + +
patch sizeFID ↓sFID ↓IS ↑Precision ↑Recall ↑speed†
target res.64×64; kickoff seq.len.2×2; 600K iters
2×23.496.4567.810.780.541.28
4×43.415.5268.830.770.560.58
target res.256×256; kickoff seq.len.2×2; 100K iters
2×228.506.4047.370.580.5330.88
4×433.177.7142.290.570.527.31
8×847.509.6331.190.450.503.96
target res.256×256; kickoff seq.len.2×2; 1600K iters; EMA
4×42.815.48251.790.820.557.31
8×84.655.42195.500.790.543.96
+ +raw image resolution and the patch size. In this experiment, we maintain a consistent patch size of $2 \times 2$ [45], making the kickoff sequence length directly dependent on the kickoff image resolution. Specifically, we evaluate three kickoff sequence length— $2 \times 2$ , $8 \times 8$ , and $32 \times 32$ while keeping the target resolution fixed at $64 \times 64$ . Notably, the $32 \times 32$ setting represents a vanilla pixel-based approach without cascading across resolutions. + +As shown in Table 1, among these configurations, the $8 \times 8$ kickoff sequence length achieves comparable or even slightly improved FID compared to the $32 \times 32$ baseline. This suggests that initiating generation from an appropriately smaller resolution and progressively scaling up can maintain generation quality while improving computational efficiency by allocating fewer computations to the largest resolution stage. Conversely, reducing the kickoff sequence length further to $2 \times 2$ results in a performance degradation, likely because tokens at extremely low resolutions provide limited useful information and insufficient guidance for subsequent generation steps. Taking into account both generation quality and computational efficiency, we therefore adopt $8 \times 8$ as our default kickoff sequence length. + +Table 2. Effect of patch size. All models have a kickoff sequence length of $2 \times 2$ . Upper: target resolution of $64 \times 64$ ; Middle: target resolution of $256 \times 256$ resolution, training with 100K iterations due to computational constraints of patch size $2 \times 2$ ; Bottom: Extended training to 1600K iterations at $256 \times 256$ resolution.†Speed measured as number of seconds per sample on a single GPU with a batchsize of 50. + +
stepFID ↓sFID ↓IS ↑Precision ↑Recall ↑
103.395.98255.270.800.54
202.535.53272.130.820.56
302.515.82274.920.820.56
402.556.58272.680.810.56
+ +(a) Effect of number of steps per stage. CFG is a global constant value 1.50, sample function is Euler. + +
solverFID ↓sFID ↓IS ↑Precision ↑Recall ↑
Euler2.515.82274.920.820.56
Dopri52.435.38282.200.830.56
+ +(b) Effect of sample function. CFG is a global constant value 1.50, the number of steps per stage is 30 in Euler, the absolute tolerance is 1e-6 in Dopri5. + +
cfg schedulecfg max valueFID ↓IS ↑
global constant1.502.43282.2
stage-wise constant2.401.98282.1
+ +(c) Effect of classifier-free guidance (CFG) setting. Sample function is Dopri5 with absolute tolerance 1e-6. + +Table 3. Inference Setting. The best performance is obtained by CFG step-wise constant with maximum value 2.40 and Dopri5 sample function. + +Patch size. Next, we investigate the impact of patch size on model performance while maintaining a kickoff sequence length of $2 \times 2$ . Initially, we experiment with a target resolution of $64 \times 64$ and compare two patch sizes— $2 \times 2$ and $4 \times 4$ —with results presented in the upper section of Table 2. We observe that PixelFlow achieves very similar performance across these two settings, with the $4 \times 4$ patch slightly outperforming the $2 \times 2$ patch on four out of five evaluation metrics. Furthermore, using a patch size of $4 \times 4$ eliminates the highest-resolution stage required by the $2 \times 2$ patch size configuration, thus improving efficiency. + +When scaling to a larger target resolution (i.e., $256 \times 256$ ), employing a patch size of $2 \times 2$ becomes computationally infeasible due to substantial resource demands, limiting our experiments to only 100K training iterations (middle section of Table 2). This constraint necessitates adopting larger patch sizes. Although increasing the patch size further to $8 \times 8$ significantly enhances computational efficiency, it leads to a noticeable drop in performance quality. Moreover, this performance gap persists even after extended training (1600K iterations), as shown in the bottom section of Table 2. Considering both generation quality and computational cost, we therefore select a patch size of $4 \times 4$ as our default setting. + +# 4.3. Inference Schedule + +In Table 3, we provide a detailed analysis of the inference configuration space, including the number of inference + +![](images/0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg) +Figure 4. Qualitative results of class-conditional image generation of PixelFlow. All images are $256 \times 256$ resolution. + +steps at each resolution stage, the choice of ODE solver, and the scheduling of classifier-free guidance (CFG). + +Number of sample steps. In Table 3a, we evaluate the impact of the number of inference steps per resolution stage on generation quality. As the number of steps increases, we observe consistent improvements in FID, sFID, and IS, with the best overall performance achieved at 30 steps. Beyond this point, gains saturate and even slightly decline, indicating diminishing returns. + +A notable advantage of PixelFlow is its flexibility in assigning different numbers of sampling steps to each resolution stage during inference. This adaptive configuration allows fine-grained control over the sampling process, enabling performance-efficiency trade-offs. Moving beyond a uniform setting and exploring more granular stage-specific step allocations holds the potential for further performance enhancements. + +ODE Solver. We further investigate the effect of the ODE solver type on generation quality. As shown in Table 3b, we compare the first-order Euler solver with the adaptive higher-order Dormand-Prince (Dopri5) solver [14]. The results indicate that Dopri5 consistently outperforms Euler + +across most evaluation metrics, achieving lower FID and sFID scores, a higher Inception Score, and slightly better precision, while maintaining similar recall. This demonstrates that more accurate and adaptive solvers, such as Dopri5, can better capture the generative dynamics, leading to higher-quality samples—though often with increased computational cost. + +CFG Schedule. Inspired by the recent process [5, 34, 63], we propose a stage-wise CFG schedule, where different stages apply different CFG values, and from the early stage to the later stage, the value increases from 1 to $\mathrm{CFG}_{\mathrm{max}}$ . In the condition of 4 stages, we find that 0, 1/6, 2/3 and 1 of the $(\mathrm{CFG}_{\mathrm{max}} - 1)$ give the best FID performance. The comparison between global constant CFG and stage-wise CFG is shown in Table 3c, in which we search the best CFG value for each method. Our proposed stage-wise CFG boosts the FID performance from 2.43 to 1.98. + +# 4.4. Comparison on ImageNet Benchmark + +In Table 4, we compare PixelFlow with both latent-based and pixel-based image generation models on the ImageNet $256 \times 256$ benchmark. PixelFlow achieves an FID of 1.98, + +
ModelFID ↓ sFID ↓ IS ↑Precision ↑Recall ↑
Latent Space
LDM-4-G [50]3.60-247.70.870.48
DiT-XL/2 [45]2.274.60278.20.830.57
SiT-XL/2 [42]2.064.49277.50.830.59
Pixel Space
ADM-G [13]4.595.25186.70.820.52
ADM-U [13]3.946.14215.80.830.53
CDM [22]4.88-158.7--
RIN [9, 28]3.42-182.0--
SD, U-ViT-L [24]2.77-211.8--
MDM [20]3.51----
StyleGAN-XL [54]2.304.02265.10.780.53
VDM++ [31]2.12-267.7--
PaGoDA [30]1.56-259.6-0.59
SiD2 [25]1.38----
JetFormer [61]6.64--0.690.56
FractalMAR-H [37]6.15-348.90.810.46
PixelFlow (ours)1.985.83282.10.810.60
+ +representing highly competitive performance relative to state-of-the-art latent-space methods. For instance, it outperforms LDM [50] (FID 3.60), DiT [45] (FID 2.27), and SiT [42] (FID 2.06), while achieving comparable IS and recall scores. These results highlight the effectiveness of our design, suggesting that PixelFlow can serve as a strong prototype for high-quality visual generation systems. + +Compared with recent pixel-based models, PixelFlow achieves superior sample quality. It notably outperforms FractalMAR-H [37], and also delivers competitive or better results than strong baselines like ADM-U [13], SiD2 [25], and VDM++ [31]. + +We visualize class-conditional image generation of PixelFlow at $256 \times 256$ resolution in Figure 4. We can observe our model is able to generate images of high visual quality across a wide range of classes. + +# 4.5. Text-to-Image Generation + +Settings. We adopt a two-stage training strategy for text-to-image generation of PixelFlow. First, the model is initialized with an ImageNet-pretrained checkpoint at a resolution of $256 \times 256$ and trained on a subset of the LAION dataset [55] at the same resolution. In the second stage, we fine-tune the model on a curated set of high-aesthetic-quality images at a higher resolution of $512 \times 512$ . All reported results for PixelFlow are based on this final $512 \times 512$ resolution model. + +Table 4. Comparisons on class-conditional image generation on ImageNet $256 \times 256$ . PixelFlow achieves competitive performance compared with latent space based models. + +
MethodGenEval OverallT2I-CompBenchDPG Bench
ColorShapeTexture
SDv1.5 [50]0.430.37300.36460.421963.18
DALL-E 2 [49]0.520.57500.54640.6374-
SDv2.1 [50]0.500.56940.44950.4982-
SDXL [47]0.550.63690.54080.563774.65
PixArt-α [6]0.480.68860.55820.704471.11
DALL-E 3 [3]0.67†0.8110†0.6750†0.8070†83.50†
GenTron [7]-0.76740.57000.7150-
SD3 [17]0.74----
Transfusion [70]0.63----
LlamaGen [59]0.32----
Emu 3 [64]0.66†0.7913†0.5846†0.7422†80.60
PixelFlow (ours)0.600.75780.45290.600677.93
0.64†0.7689†0.5059†0.6273†
+ +Table 5. Comparison with state-of-the-art models on text-to-image generation benchmarks. We evaluate on GenEval [19], T2I-CompBench [27] and DPG-Bench [26]. We use $\dagger$ to indicate the result with prompt rewriting. + +To comprehensively evaluate the performance of PixelFlow-T2I in text-to-image generation, we employ three widely recognized benchmarks, each targeting a different facet of compositional understanding: T2I-CompBench [27] assesses alignment between generated images and complex semantic relationships in text. We evaluate three tasks—color, shape, and texture binding—by generating five images per prompt across 300 prompts per sub-task. Alignment is measured using BLIP-VQA[36]; GenEval [19] evaluates compositional aspects such as coherence and spatial arrangement. We generate over 2,000 images from 553 prompts and report the average performance across tasks; DPG-Bench [26] focuses on complex textual descriptions, with 4,000 images generated from 1,065 prompts and results averaged across tasks. + +Quantitative results. As shown in Table 5, PixelFlow achieves competitive performance across all benchmarks, demonstrating strong compositional understanding in freeform text-to-image generation. It performs particularly well on T2I-CompBench, with high scores in color and texture binding, and solid results on GenEval (0.64) and DPG-Bench (77.93), surpassing many established models. These results underscore PixelFlow as a promising direction for pixel-space image generation conditioned on natural language—showcasing its potential for open-ended, text-driven image synthesis. + +Visualization. We visualize the intermediate results during the sampling process in Figure 3, specifically show + +![](images/dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg) +A native Warrior shaman Bengal Cat with a black and white leopard pattern, blue eyes, short fur, and portrait pose, colorful feathers and colorful ornaments, a regal oil-style portrait of the queen of native Kitty shaman white Cat with wings and headdress. Nordic is kind and motherly, it has black eye makeup and her hair is in messy. + +![](images/829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg) +1940s vintage colored photo of a well-groomed man, crew cut hair, front view, kodak portrait film + +![](images/689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg) +A cute 3 year old Chinese girl with a big head and a small body, hair is fluffy and messy tied in a pill head, big eyes, one eye blinking, doe mouth, playful and cute. + +![](images/f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg) +An extremely happy American Cocker Spaniel is smiling and looking up at the camera with his head tilted to one side. + +![](images/9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg) +Full body portrait of deer by side, visible realistic, with style as a painting in the style by Caravaggio + +![](images/92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg) +Greeting card, party, hyped animal, open mouth, surprised excitement + +![](images/e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg) +Super cute clay world, isometric view of Eiffel Tower in Paris, cute clay stop motion animation, people + +![](images/2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg) +Close-up of an aged man with weathered features and sharp blue eyes peering wisely from beneath a tweed flat cap. + +![](images/ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg) +A white bearded man's face emerges from a cloud of white butterflies, background is white +Figure 5. Qualitative results of text-conditional generation of PixelFlow. All images are $512 \times 512$ resolution. Key components of the prompt are highlighted in RED. + +![](images/f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg) + +![](images/4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg) +A digital art piece featuring a splitface portrait of a woman. The left side of face is in a calm, while the right side shows a more intense and red color + +![](images/7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg) +A baby cat stands on two legs. facing forward, wearing an Indian classical gloves and shoes. +Johannes Vermeer, panda wearing pearl earrings, blue headbands, artwork Girl with a Pearl Earring oil painting, + +ing the final step of each resolution stage. As resolution increases, a clear denoising trend emerges—images become progressively cleaner and less noisy at each stage. Additional generated samples along with their input text prompts are shown in Figure 5 (512×512) and Figure 6 (1024×1024). PixelFlow demonstrates high visual fidelity and strong text-image alignment, effectively capturing key visual elements and their relationships from complex prompts. Notably, it generates fine-grained details—such as animal fur, human hair, and hat textures—highlighting its strong attention to detail in pixel space. + +# 5. Conclusion + +We introduce PixelFlow, a novel image generation model that re-think the predominance of latent space based models by directly operating on raw pixel space. By directly + +transforming between different resolution stages, our model exhibits a compelling advantage in simplicity and end-to-end trainability. On both class-conditional image generation and text-to-image generation benchmarks, PixelFlow has been proven to demonstrate competitive image generation capabilities compared to popular latent space-based methods. We hope that this new perspective will inspire future research in visual generation models. + +Limitations Despite its advantages, PixelFlow still faces certain limitations. Although the model avoids full-resolution computation across all stages, the final stage requires full-resolution attention, which accounts for roughly $80\%$ of the total inference time. Moreover, we observe that training convergence slows as the sequence length increases. Addressing these challenges presents opportunities for future improvements in efficiency and scalability. + +![](images/382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg) +Raspberry in the form of women walk along the path of a fairy tale forest. She carries a jug of water with her. Her head is made of one big raspberry on which she has big and beautiful eyes, as well as nose and mouth. + +![](images/33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg) +An embroidered sweater with an anatomical illustration of the human torso and chest, the skin is open to reveal the internal anatomy. + +![](images/3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg) + +Figure 6. Qualitative samples of PixelFlow. We present the generated images of $1024 \times 1024$ resolution. Key words are highlighted in RED. +![](images/aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg) +Photorealistic, 4k, a micro baby African Buffalo perched on a coffee cup + +![](images/3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg) +Great Dane Dog sitting on a toilet bowl in wide bathroom, reading a large double page spread newspaper, sit like human. The background is in a white room. + +![](images/d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg) +A picture of Joe rogan's head on a cat's body, sitting behind a podcasting microphone. + +![](images/8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg) +Full body shot of balenciaga fashion model and parrot hybrid with a human body and the head of the parrot. He is walking through a podium like a model. + +![](images/17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg) +Prototype flying fox made from blown glass, Lino Tagliapietra style Muranean glassmaking, intricate details. +3D illustration of the chip with text "AI" floating above it, with a blue color scheme. + +![](images/af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg) +Sketch sheet of anatomical studies by Leonardo da Vinci Iron man and weapons, show detailed studies of technology and body, use little soft details in red and gold for the armor, mathematic. + +![](images/c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg) +The world's smallest laughing baby Piggy, perched on someone's finger. + +![](images/d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg) +Telephoto lens shooting, panoramic view, a white sheep struggling desperately under the sea, with bubbles constantly popping out of its mouth, realistic and lifelike. + +# References + +[1] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In The Eleventh International Conference on Learning Representations, 2023. 2 +[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 2 +[3] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.opennai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 7 +[4] Andrew Campbell, William Harvey, Christian Dietrich Weilbach, Valentin De Bortoli, Tom Rainforth, and Arnaud Doucet. Trans-dimensional generative modeling via jump diffusion models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 4 +[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 6 +[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 4, 7 +[7] Shoufa Chen, Mengmeng Xu, Jiawei Ren, Yuren Cong, Sen He, Yanping Xie, Animesh Sinha, Ping Luo, Tao Xiang, and Juan-Manuel Perez-Rua. Gentron: Diffusion transformers for image and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6441-6451, 2024. 1, 4, 7 +[8] Shoufa Chen, Chongjian Ge, Yuqi Zhang, Yida Zhang, Fengda Zhu, Hao Yang, Hongxiang Hao, Hui Wu, Zhichao Lai, Yifei Hu, et al. Goku: Flow based video generative foundation models. arXiv preprint arXiv:2502.04896, 2025.1, 4 +[9] Ting Chen. On the importance of noise scheduling for diffusion models. arXiv preprint arXiv:2301.10972, 2023. 7 +[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instructionfinetuned language models. Journal of Machine Learning Research, 25(70):1-53, 2024. 4 +[11] Mostafa Dehghani, Basil Mustafa, Josip Djolonga, Jonathan Heek, Matthias Minderer, Mathilde Caron, Andreas Steiner, Joan Puigcerver, Robert Geirhos, Ibrahim M Alabdul-mohsin, et al. Patch n'pack: Navit, a vision transformer for any aspect ratio and resolution. Advances in Neural Information Processing Systems, 36, 2024. 3, 4 +[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4 + +[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 7 +[14] John R Dormand and Peter J Prince. A family of embedded runge-kutta formulae. Journal of computational and applied mathematics, 6(1):19-26, 1980. 6 +[15] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 3 +[16] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 2 +[17] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1, 2, 3, 4, 7 +[18] Zach Evans, CJ Carr, Josiah Taylor, Scott H Hawley, and Jordi Pons. Fast timing-conditioned latent audio diffusion. In *Forty-first International Conference on Machine Learning*, 2024. 1 +[19] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36, 2024. 2, 4, 7 +[20] Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Joshua M Susskind, and Navdeep Jaitly. Matryoshka diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 1, 2, 7 +[21] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2 +[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. Journal of Machine Learning Research, 23(47):1-33, 2022. 1, 2, 7 +[23] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 1 +[24] Emiel Hoogeboom, Jonathan Heek, and Tim Salimans. simple diffusion: End-to-end diffusion for high resolution images. In International Conference on Machine Learning, pages 13213-13232. PMLR, 2023. 2, 7 +[25] Emiel Hoogeboom, Thomas Mensink, Jonathan Heek, Kay Lamerigts, Ruiqi Gao, and Tim Salimans. Simpler diffusion (sid2): 1.5 fid on imagenet512 with pixel-space diffusion. arXiv preprint arXiv:2410.19324, 2024. 2, 7 +[26] Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024. 2, 4, 7 +[27] Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023. 4, 7 + +[28] Allan Jabri, David Fleet, and Ting Chen. Scalable adaptive computation for iterative generation. arXiv preprint arXiv:2212.11972, 2022. 7 +[29] Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.3,4 +[30] Dongjun Kim, Chieh-Hsin Lai, Wei-Hsiang Liao, Yuhta Takida, Naoki Murata, Toshimitsu Uesaka, Yuki Mitsufuji, and Stefano Ermon. Pagoda: Progressive growing of a one-step generator from a low-resolution diffusion teacher. arXiv preprint arXiv:2405.14822, 2024. 2, 7 +[31] Diederik Kingma and Ruiqi Gao. Understanding diffusion objectives as the elbo with simple data augmentation. Advances in Neural Information Processing Systems, 36, 2024. 7 +[32] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015. 4 +[33] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 4 +[34] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6 +[35] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.1, 2 +[36] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022. 7 +[37] Tianhong Li, Qinyi Sun, Lijie Fan, and Kaiming He. Fractal generative models. arXiv preprint arXiv:2502.17437, 2025.2, 7 +[38] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In The Eleventh International Conference on Learning Representations, 2023. 2 +[39] Haohe Liu, Zehua Chen, Yi Yuan, Xinhao Mei, Xubo Liu, Danilo Mandic, Wenwu Wang, and Mark D Plumbley. Audioldm: Text-to-audio generation with latent diffusion models. arXiv preprint arXiv:2301.12503, 2023. 1 +[40] Xingchao Liu, Chengyue Gong, and qiang liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. In The Eleventh International Conference on Learning Representations, 2023. 2 +[41] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 4 +[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable + +interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 7 +[43] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 4 +[44] NVIDIA. Edify image: High-quality image generation with pixel space laplacian diffusion model. arXiv preprint arXiv:2411.07126, 2024. 1 +[45] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 1, 2, 3, 4, 5, 7 +[46] Pablo Pernias, Dominic Rampas, Mats L Richter, Christopher J Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. arXiv preprint arXiv:2306.00637, 2023. 1 +[47] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 1, 2, 7 +[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International conference on machine learning, pages 8821-8831. Pmlr, 2021. 2 +[49] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7 +[50] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 7 +[51] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 1 +[52] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image superresolution via iterative refinement. IEEE transactions on pattern analysis and machine intelligence, 45(4):4713-4726, 2022. 1, 2 +[53] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 4 +[54] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 7 +[55] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 7 + +[56] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pages 2256-2265. PMLR, 2015. 2 +[57] Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, et al. Ldm3d: Latent diffusion model for 3d. arXiv preprint arXiv:2305.10853, 2023. 1 +[58] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 3 +[59] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 2, 4, 7 +[60] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 4 +[61] Michael Tschannen, André Susano Pinto, and Alexander Kolesnikov. Jetformer: An autoregressive generative model of raw images and text. arXiv preprint arXiv:2411.19722, 2024. 7 +[62] Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Neural Information Processing Systems, 2017. 3, 4 +[63] Xi Wang, Nicolas Dufour, Nefeli Andreou, Marie-Paule Cani, Victoria Fernández Abrevaya, David Picard, and Vicky Kalogeiton. Analysis of classifier-free guidance weight schedulers. arXiv preprint arXiv:2404.13040, 2024. 6 +[64] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 7 +[65] Hanshu Yan, Xingchao Liu, Jiachun Pan, Jun Hao Liew, qiang liu, and Jiashi Feng. PeRFlow: Piecewise rectified flow as universal plug-and-play accelerator. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 3 +[66] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 1, 2 +[67] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. LION: Latent point diffusion models for 3d shape generation. In Advances in Neural Information Processing Systems, 2022. 1 +[68] Shuangfei Zhai, Ruixiang Zhang, Preetum Nakkiran, David Berthelot, Jiatao Gu, Huangjie Zheng, Tianrong Chen, Miguel Angel Bautista, Navdeep Jaitly, and Josh Susskind. Normalizing flows are capable generative models. arXiv preprint arXiv:2412.06329, 2024. 2 + +[69] Shilong Zhang, Wenbo Li, Shoufa Chen, Chongjian Ge, Peize Sun, Yida Zhang, Yi Jiang, Zehuan Yuan, Binyue Peng, and Ping Luo. Flashvideo: Flowing fidelity to detail for efficient high-resolution video generation. arXiv preprint arXiv:2502.05179, 2025. 1 +[70] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 7 \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07963/images/0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg b/data/2025/2504_07xxx/2504.07963/images/0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dd90a7b18e3062b54927c8ab4c4ca0669b3a64a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:539166251a2d6f6d5e772522665bbb01b9fb85434153841bbbbabfb02bf33420 +size 197386 diff --git a/data/2025/2504_07xxx/2504.07963/images/0e303bdb1b93ae43dcdd2c8a5980195c88032b7943e9c9c23810e68612c366c0.jpg b/data/2025/2504_07xxx/2504.07963/images/0e303bdb1b93ae43dcdd2c8a5980195c88032b7943e9c9c23810e68612c366c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7f39b8585655546e9da41309e897b9b55a7055f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/0e303bdb1b93ae43dcdd2c8a5980195c88032b7943e9c9c23810e68612c366c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54af35f5ca9c80d40068c4d438731ae29b3a5ff977848c484a6410e9da355a4a +size 76484 diff --git a/data/2025/2504_07xxx/2504.07963/images/17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg b/data/2025/2504_07xxx/2504.07963/images/17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37478de47c5d8831dda356c234eb920f0131d7cf --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a28fb0f201d6223b3b66e09d44f0e8fda8ac1284f93d3df6c00655abb0b3e9a3 +size 9517 diff --git a/data/2025/2504_07xxx/2504.07963/images/1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg b/data/2025/2504_07xxx/2504.07963/images/1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a976ed3b4915ba56820909d414e682e96fadeac --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0f199fe995620ff4fbc9d32b586e70f01952831ef129e7e5eefdcd6b36db66 +size 37255 diff --git a/data/2025/2504_07xxx/2504.07963/images/2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg b/data/2025/2504_07xxx/2504.07963/images/2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b4ee6e43998b7ce2f8374a1f33a1982162b3d50 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b447d23353418f0cc789bb99b850f197b5bdbccd57d9135d8d767c2a75df00c +size 14824 diff --git a/data/2025/2504_07xxx/2504.07963/images/2aa2c91c088d596f217bb74d209a97ca363f7b3f546d28a8f2151e1737d538ec.jpg b/data/2025/2504_07xxx/2504.07963/images/2aa2c91c088d596f217bb74d209a97ca363f7b3f546d28a8f2151e1737d538ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9232cc79ee964bf7ac18a766e5e4cc6e52ac7b31 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/2aa2c91c088d596f217bb74d209a97ca363f7b3f546d28a8f2151e1737d538ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72172c9601d9b79386753f3c471b1c5a3243ad7c95fb0279f6699c6cd7cf6823 +size 3715 diff --git a/data/2025/2504_07xxx/2504.07963/images/2ef03556144a3f07df0d650822db97bd860a68224ee0967d14ce8cfd30a207b3.jpg b/data/2025/2504_07xxx/2504.07963/images/2ef03556144a3f07df0d650822db97bd860a68224ee0967d14ce8cfd30a207b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0ece6923aff0020acab724458d292d354454596 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/2ef03556144a3f07df0d650822db97bd860a68224ee0967d14ce8cfd30a207b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f560cb51c799e5bc5ca3e45f1aed6986919bd6f28aa086b1602d9af4a8e71c1 +size 3272 diff --git a/data/2025/2504_07xxx/2504.07963/images/33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg b/data/2025/2504_07xxx/2504.07963/images/33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e738cf7001e433ebbd0e17176e0f1455b4461b21 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4295e85792ad6e21a70dc724919e35e1fcfe04ac3790e9cfb6792c8fabcbb87 +size 16617 diff --git a/data/2025/2504_07xxx/2504.07963/images/3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg b/data/2025/2504_07xxx/2504.07963/images/3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..574fa3703aa278828ec0392b5721b7ec86b3e53d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82152c02e273f0eb4ce9caaeb9993f63e782dc9ddb5124abcab3ad0f1aa6c247 +size 9711 diff --git a/data/2025/2504_07xxx/2504.07963/images/382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg b/data/2025/2504_07xxx/2504.07963/images/382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21771808f14b07b26923d3e3505dc5f108f376cd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f6d9d3d86de026968a428714d2ad9a06ef2061ae48e1b3412f8940e02380a6d +size 67025 diff --git a/data/2025/2504_07xxx/2504.07963/images/3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg b/data/2025/2504_07xxx/2504.07963/images/3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fd882552c26da62995ba3f5b6b258649564e4d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3d040c43896493aa4ce5b571a18c83cf74db962b1348a6f50cb298766dd7f4 +size 15622 diff --git a/data/2025/2504_07xxx/2504.07963/images/49e59133f35898f9f0b75a00267c4bcc31480d8106373d9eec10a3df2fcebdbb.jpg b/data/2025/2504_07xxx/2504.07963/images/49e59133f35898f9f0b75a00267c4bcc31480d8106373d9eec10a3df2fcebdbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccbb4146e9ebb357fe6b23807efce91a99d929f4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/49e59133f35898f9f0b75a00267c4bcc31480d8106373d9eec10a3df2fcebdbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d65e9b5cb1a2f575a7f4d2e393c8e2951ea95aa5a0209d043d8a0ac4626129ef +size 5245 diff --git a/data/2025/2504_07xxx/2504.07963/images/4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg b/data/2025/2504_07xxx/2504.07963/images/4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f4045e8548cdd857ed482de83d387bdd6000f6c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:698d3f4b09dd09e689c83e9177da405361bb0e299d305f4f845068891c0f2cfb +size 13068 diff --git a/data/2025/2504_07xxx/2504.07963/images/689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg b/data/2025/2504_07xxx/2504.07963/images/689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6557222f3afea557a3f5b23888dc9ffed6999380 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c474b59c72532c223956d689d2e8565c35d42d4ca56e74ad9fab84a6a22571d +size 8013 diff --git a/data/2025/2504_07xxx/2504.07963/images/6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg b/data/2025/2504_07xxx/2504.07963/images/6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..473331db8e64435c679d4260488c3d1f6a8d7112 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:591381b0d9b523b2dd363055182261d165b4909322d00c04034449291ba75b9e +size 32189 diff --git a/data/2025/2504_07xxx/2504.07963/images/6cbd33b515a21068b5fcb4146239d04f29be501e2f8283f26fcd92f989d561aa.jpg b/data/2025/2504_07xxx/2504.07963/images/6cbd33b515a21068b5fcb4146239d04f29be501e2f8283f26fcd92f989d561aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddbaeca9490b0180a0586dcf527ff08c5af05fb5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/6cbd33b515a21068b5fcb4146239d04f29be501e2f8283f26fcd92f989d561aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0721157035897e571d89af62011e01f3a947b582d5ff99ed1c30a940d867c786 +size 15479 diff --git a/data/2025/2504_07xxx/2504.07963/images/7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg b/data/2025/2504_07xxx/2504.07963/images/7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..161ee0e4592f232769573c937a903b47c6991696 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b56465069ac280b7c7ebced77b3c59577a5a4a1133fd0addf308ecbeaaff958 +size 13356 diff --git a/data/2025/2504_07xxx/2504.07963/images/7e57ec6108ab75d81d9644abadf0d757a0102e7a9fb3debaeb9465e0877cf73d.jpg b/data/2025/2504_07xxx/2504.07963/images/7e57ec6108ab75d81d9644abadf0d757a0102e7a9fb3debaeb9465e0877cf73d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d74baa97b98569486f9d6ecfdd99a4aecc9fce5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/7e57ec6108ab75d81d9644abadf0d757a0102e7a9fb3debaeb9465e0877cf73d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:404a1188d0ce2943fd7a36f33d4c8a8d7f640b4f973a52c555cf9dd6c7db2971 +size 21106 diff --git a/data/2025/2504_07xxx/2504.07963/images/829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg b/data/2025/2504_07xxx/2504.07963/images/829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e9e7384d09d24d1730efd3da9c4e9eb779c31f2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aa16fdee0f1f60972ceae1a8c9d7d06906f99609de8cf9a22999a06668e03cf +size 11253 diff --git a/data/2025/2504_07xxx/2504.07963/images/8beded7c1a3aa975ea063d812dd598403ae35915c9c6a44cc8f67d5cc82ffdac.jpg b/data/2025/2504_07xxx/2504.07963/images/8beded7c1a3aa975ea063d812dd598403ae35915c9c6a44cc8f67d5cc82ffdac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2aeaf6eb1604c0195f1d059f7d80d35a03f2f5e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/8beded7c1a3aa975ea063d812dd598403ae35915c9c6a44cc8f67d5cc82ffdac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93f0703fca422ad3a0fd77b83c4fb8641c287fa26f79e06fe87ec6f0ce9b1a86 +size 19085 diff --git a/data/2025/2504_07xxx/2504.07963/images/8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg b/data/2025/2504_07xxx/2504.07963/images/8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9fddd46bdfb853d6e934659731765478cb0fe8a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:956e67d093e1ae459a42f458fb0006fd96a9ad4a2468d4a8453725b9ce9fc597 +size 10580 diff --git a/data/2025/2504_07xxx/2504.07963/images/92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg b/data/2025/2504_07xxx/2504.07963/images/92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cc401673836daec584a0a495b4693141b32bb27 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f2b0d701e0eba6ceca3bf4346397ef7a7359e805ff2ab3abdf62a1c7560ea30 +size 14780 diff --git a/data/2025/2504_07xxx/2504.07963/images/930b73d7ef35d23ff32bbb26a02538951982cb4c56d34618fa2400a3735f0f6d.jpg b/data/2025/2504_07xxx/2504.07963/images/930b73d7ef35d23ff32bbb26a02538951982cb4c56d34618fa2400a3735f0f6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d59c98f56813001b79955d0836bd5ee62df6c186 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/930b73d7ef35d23ff32bbb26a02538951982cb4c56d34618fa2400a3735f0f6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dfb94bff7f499c1461a589c36381958870d420a2200e078f0be2e5b443cdada +size 4663 diff --git a/data/2025/2504_07xxx/2504.07963/images/960f1767c92ece40bd94624c230c619ad8e2686c31b28e678b596ac051a2b79b.jpg b/data/2025/2504_07xxx/2504.07963/images/960f1767c92ece40bd94624c230c619ad8e2686c31b28e678b596ac051a2b79b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..763c8245a7b58477f8d8b5e2a9b7764ee24829dd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/960f1767c92ece40bd94624c230c619ad8e2686c31b28e678b596ac051a2b79b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8453baf7ccc2edf4c30a365f76a605384d54366b5aeaa20e3e839e1ad1c3ed59 +size 73052 diff --git a/data/2025/2504_07xxx/2504.07963/images/9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg b/data/2025/2504_07xxx/2504.07963/images/9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..037fe6056a22a1762679ec42af4fd7c1324a0977 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d38f4d876b671c30a2ee853f222d8ef6ca118d9d8826171c02d9219a596184 +size 11684 diff --git a/data/2025/2504_07xxx/2504.07963/images/aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg b/data/2025/2504_07xxx/2504.07963/images/aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42b20bf0b778725e22d467bc1ff27c1459fab86f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f47df72cd14483cf58a4a8e43cb2128d38b6fcfcd5cd683e5917d615ea846a0 +size 9093 diff --git a/data/2025/2504_07xxx/2504.07963/images/ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg b/data/2025/2504_07xxx/2504.07963/images/ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6072545a08e5e3d939b183986fa37ca6f0f848b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90fb8b3cb0ce0d9f3ff097092d557b3e9acf526974b419943c2b6cb67b50727 +size 10031 diff --git a/data/2025/2504_07xxx/2504.07963/images/af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg b/data/2025/2504_07xxx/2504.07963/images/af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad3f0c0192cf0608f76c2ed2e88be41a59c1d3e7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f618a9c744f4ccd84d0e1b14ab3bc4c5a7fca31a8c01c543dd6ece2233b339e8 +size 18535 diff --git a/data/2025/2504_07xxx/2504.07963/images/b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg b/data/2025/2504_07xxx/2504.07963/images/b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c684b4aa3434a044508edfc233c4cc6ccf5c98d6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ce6985d62200c63dde2ff0fca456c568668ac34a1fb8a3d180612796be74e90 +size 33308 diff --git a/data/2025/2504_07xxx/2504.07963/images/bc8cb8556707360b013a1190d7d5c509b2a7f2bea7b60c77621658eab126a3e8.jpg b/data/2025/2504_07xxx/2504.07963/images/bc8cb8556707360b013a1190d7d5c509b2a7f2bea7b60c77621658eab126a3e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7ec9d53dc096368413be8b01434388b723abede --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/bc8cb8556707360b013a1190d7d5c509b2a7f2bea7b60c77621658eab126a3e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59decb617b05d78e89fc2cdcd9f32e6d88e21eb4c213ffdcbb5d6b69fc7995d +size 15669 diff --git a/data/2025/2504_07xxx/2504.07963/images/c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg b/data/2025/2504_07xxx/2504.07963/images/c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6bbb33a37a37bf00145f9eb466e9b1b9b28fa5d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42111c1478e298f656268d40823db3dee8b8cdb0d86cd3a512d419d102fc4d58 +size 10143 diff --git a/data/2025/2504_07xxx/2504.07963/images/cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg b/data/2025/2504_07xxx/2504.07963/images/cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84f4b2db8cba41fb35d599d32396877a3a9139b9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7300055dcf008dc4584156cd639c300814e24796aa62376d27f88992e9bb7db5 +size 17556 diff --git a/data/2025/2504_07xxx/2504.07963/images/ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg b/data/2025/2504_07xxx/2504.07963/images/ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a264e37759fc304a015e6cb69b70bd54263bbc3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab2ab7f4eeaa175f065739498681dc67f884adefb924f603602b3c2d9984fbff +size 81149 diff --git a/data/2025/2504_07xxx/2504.07963/images/d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg b/data/2025/2504_07xxx/2504.07963/images/d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1aa8798b5fb74fe8ff965a2224ee71e41774367a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:453a0043b5d91bdb0831b888380a641eccd67e43a820dce753c9c8a2a70a047b +size 13727 diff --git a/data/2025/2504_07xxx/2504.07963/images/d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg b/data/2025/2504_07xxx/2504.07963/images/d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4a26d9ab630d648f180164bf1faf29e0e09c4aa --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68d0e012ff65841b3e9558dc919fdbe21acd3f5bb65355add2c7e4bb891e11c2 +size 11801 diff --git a/data/2025/2504_07xxx/2504.07963/images/dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg b/data/2025/2504_07xxx/2504.07963/images/dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58a7650dfb613619a538ce698d61ebbb9c73ea4f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af8de28c4209b65c72190c4b625c8fdc8cbfad22b10deb8e66ee15cfdfec0d4b +size 48678 diff --git a/data/2025/2504_07xxx/2504.07963/images/e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg b/data/2025/2504_07xxx/2504.07963/images/e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aeb2223a82ebbca4a583b3fd31bf12aa477ed96c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc3dc7200593df6632bb2d437b1ba30a7a76516b16f9194c22b066f6e049a6df +size 12758 diff --git a/data/2025/2504_07xxx/2504.07963/images/e4f49219eea9ec876a4ab556fba046171327dec81549b16dd71d6a4fecb0320f.jpg b/data/2025/2504_07xxx/2504.07963/images/e4f49219eea9ec876a4ab556fba046171327dec81549b16dd71d6a4fecb0320f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dc7437869708f7154a0ce5b9d17d5f54c8f3874 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/e4f49219eea9ec876a4ab556fba046171327dec81549b16dd71d6a4fecb0320f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:018c079f05a856088ca8043bb82b1b8640121705a6cf13d0086ed974dcd0ac33 +size 64844 diff --git a/data/2025/2504_07xxx/2504.07963/images/e6dd0f7a09ced4341a150bac664879ce25b786ee43a5758a39d74c9586dacc1f.jpg b/data/2025/2504_07xxx/2504.07963/images/e6dd0f7a09ced4341a150bac664879ce25b786ee43a5758a39d74c9586dacc1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca9faf13c928b3ecac095bd4cf2a1832d4cefe7b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/e6dd0f7a09ced4341a150bac664879ce25b786ee43a5758a39d74c9586dacc1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fbb5375b4f32cb20758a0723ca586621d9ef6938cb87d5ab7a3ea528f36a6d9 +size 6143 diff --git a/data/2025/2504_07xxx/2504.07963/images/f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg b/data/2025/2504_07xxx/2504.07963/images/f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acc4c6de0e5909d410045271e67c744d9ebc98a3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01af77e4ad67c37449d6f3fef9cfbbe542041e78853c0cc0dd5511c5b966a3ef +size 15515 diff --git a/data/2025/2504_07xxx/2504.07963/images/f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg b/data/2025/2504_07xxx/2504.07963/images/f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ef2b3d0af7810d89537fb261e03975ad816dbe6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/images/f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c9c3720aaec6b39c819618ac78d7f0c2f5f8288da38871dddb6053cd36857c6 +size 13870 diff --git a/data/2025/2504_07xxx/2504.07963/layout.json b/data/2025/2504_07xxx/2504.07963/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4eb3459c5796269598764c4421cfeba9eefc68d3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07963/layout.json @@ -0,0 +1,10198 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 141, + 103, + 470, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 103, + 470, + 121 + ], + "spans": [ + { + "bbox": [ + 141, + 103, + 470, + 121 + ], + "type": "text", + "content": "PixelFlow: Pixel-Space Generative Models with Flow" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 142, + 496, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 142, + 496, + 175 + ], + "spans": [ + { + "bbox": [ + 111, + 142, + 496, + 175 + ], + "type": "text", + "content": "Shoufa Chen1 Chongjian Ge1,2 Shilong Zhang1 Peize Sun1 Ping Luo1 \n1The University of Hong Kong 2Adobe" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 151, + 201, + 200, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 201, + 200, + 213 + ], + "spans": [ + { + "bbox": [ + 151, + 201, + 200, + 213 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 226, + 296, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 226, + 296, + 406 + ], + "spans": [ + { + "bbox": [ + 53, + 226, + 296, + 406 + ], + "type": "text", + "content": "We present PixelFlow, a family of image generation models that operate directly in the raw pixel space, in contrast to the predominant latent-space models. This approach simplifies the image generation process by eliminating the need for a pre-trained Variational Autoencoder (VAE) and enabling the whole model end-to-end trainable. Through efficient cascade flow modeling, PixelFlow achieves affordable computation cost in pixel space. It achieves an FID of 1.98 on " + }, + { + "bbox": [ + 53, + 226, + 296, + 406 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 53, + 226, + 296, + 406 + ], + "type": "text", + "content": " ImageNet class-conditional image generation benchmark. The qualitative text-to-image results demonstrate that PixelFlow excels in image quality, artistry, and semantic control. We hope this new paradigm will inspire and open up new opportunities for next-generation visual generation models. Code and models are available at https://github.com/ShoufaChen/PixelFlow." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 430, + 136, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 430, + 136, + 442 + ], + "spans": [ + { + "bbox": [ + 56, + 430, + 136, + 442 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 459, + 277, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 459, + 277, + 471 + ], + "spans": [ + { + "bbox": [ + 75, + 459, + 277, + 471 + ], + "type": "text", + "content": "Numquam ponenda est pluralitas sine necessitate." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 198, + 480, + 290, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 480, + 290, + 491 + ], + "spans": [ + { + "bbox": [ + 198, + 480, + 290, + 491 + ], + "type": "text", + "content": "William of Ockham" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 510, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 295, + 653 + ], + "type": "text", + "content": "Driven by the success of the Stable Diffusion (SD) model series [17, 46, 47, 50], latent diffusion models (LDMs) [50] have emerged as the de facto standard for generative modeling across diverse modalities, spanning image [17, 35, 45], video [7, 8, 23, 66, 69], audio [18, 39], and 3D [57, 67]. As shown in Figure 1 (a), LDMs compress raw data into a compact latent space using pre-trained Variencoders (VAEs). This compression reduces computational demands and facilitates efficient diffusion denoising. Despite their widespread success, LDMs decouple the VAE and diffusion components, hindering joint optimization and complicating holistic diagnosis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "type": "text", + "content": "An alternative approach is to implement diffusion models in the raw pixel space. While intuitive, this becomes computationally unaffordable for high-resolution images due to the substantial resources required to process per-pixel correlations. Considering this, prior research [20, 22, 44," + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 320, + 200, + 548, + 316 + ], + "blocks": [ + { + "bbox": [ + 320, + 200, + 548, + 316 + ], + "lines": [ + { + "bbox": [ + 320, + 200, + 548, + 316 + ], + "spans": [ + { + "bbox": [ + 320, + 200, + 548, + 316 + ], + "type": "image", + "image_path": "1e011bdb3b1072d22c1a3b15e9bc1d155d38ad1bc57863c4acfb76b601896f88.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 320, + 327, + 548, + 434 + ], + "blocks": [ + { + "bbox": [ + 352, + 317, + 525, + 327 + ], + "lines": [ + { + "bbox": [ + 352, + 317, + 525, + 327 + ], + "spans": [ + { + "bbox": [ + 352, + 317, + 525, + 327 + ], + "type": "text", + "content": "(a) Latent-based Diffusion Models (Two stages)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 327, + 548, + 434 + ], + "lines": [ + { + "bbox": [ + 320, + 327, + 548, + 434 + ], + "spans": [ + { + "bbox": [ + 320, + 327, + 548, + 434 + ], + "type": "image", + "image_path": "b214496f8b07f0bbdf963f7d6f115fc68945c207311521cdcfe9bbec49e691f4.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 434, + 517, + 444 + ], + "lines": [ + { + "bbox": [ + 350, + 434, + 517, + 444 + ], + "spans": [ + { + "bbox": [ + 350, + 434, + 517, + 444 + ], + "type": "text", + "content": "(b) Pixel-based Diffusion Models (Two stages)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 327, + 447, + 548, + 512 + ], + "blocks": [ + { + "bbox": [ + 327, + 447, + 548, + 512 + ], + "lines": [ + { + "bbox": [ + 327, + 447, + 548, + 512 + ], + "spans": [ + { + "bbox": [ + 327, + 447, + 548, + 512 + ], + "type": "image", + "image_path": "cade610c4d95320b458b117e3a2d2649793ed3224ae557aee8875f6dd565a9e8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 520, + 504, + 531 + ], + "lines": [ + { + "bbox": [ + 364, + 520, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 364, + 520, + 504, + 531 + ], + "type": "text", + "content": "(c) PixelFlow (End-to-end one stage)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 545, + 555, + 655 + ], + "lines": [ + { + "bbox": [ + 313, + 545, + 555, + 655 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 555, + 655 + ], + "type": "text", + "content": "Figure 1. Comparisons of Design Paradigms between latent-based diffusion models (LDMs), pixel-based diffusion models (PDMs), and PixelFlow: (a) LDMs split training into two separate stages—first independently training off-the-shell VAEs, then training diffusion models on tokens extracted from the pre-trained VAEs; (b) Previous PDMs typically train two separate models: a diffusion model on low-resolution images and an upsampler for high-resolution synthesis; (c) PixelFlow, by contrast, offers an end-to-end solution for pixel-based generation, combining both high efficiency and strong generative performance." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": "51, 52] has typically adopted a cascaded approach: first generating a low-resolution image, then employing additional upsamplers to produce high-quality outputs, with the low" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.07963v1 [cs.CV] 10 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": "resolution image serving as conditioning input, as shown in Figure 1(b). However, these cascaded methods also introduce separate networks for different stages, still limiting the benefits of end-to-end design." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 126, + 295, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 126, + 295, + 281 + ], + "spans": [ + { + "bbox": [ + 55, + 126, + 295, + 281 + ], + "type": "text", + "content": "In this work, we introduce PixelFlow, a simple but effective end-to-end framework for direct image generation in raw pixel space, without the need of separate networks like VAEs or upsamplers. As illustrated in Figure 1(c), PixelFlow uses a unified set of parameters to model multiscale samples across cascading resolutions via Flow Matching [38, 40]. At early denoising stages, when noise levels are high, PixelFlow operates on lower-resolution samples. As denoising progresses, the resolution gradually increases until it reaches the target resolution in the final stage. This progressive strategy avoids performing all denoising steps at full resolution, thereby significantly reducing the overall computational cost of the generation process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 287, + 295, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 287, + 295, + 406 + ], + "spans": [ + { + "bbox": [ + 55, + 287, + 295, + 406 + ], + "type": "text", + "content": "During training, the cross-scale samples at different timesteps are constructed by: (1) resizing the images to successive scales and adding Gaussian noise to each scaled image; (2) interpolating between adjacent scale noisy images as model input and conducting velocity prediction. The entire model is trained end-to-end using uniformly sampled training examples from all stages. During inference, the process begins with pure Gaussian noise at the lowest resolution. The model then progressively denoises and upscales the image until the target resolution is reached." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 413, + 295, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 413, + 295, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 413, + 295, + 556 + ], + "type": "text", + "content": "We evaluated PixelFlow on both class-conditional and text-to-image generation tasks. Compared to established latent-space diffusion models [42, 45, 50], PixelFlow delivers competitive performance. For instance, on the " + }, + { + "bbox": [ + 55, + 413, + 295, + 556 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 413, + 295, + 556 + ], + "type": "text", + "content": " ImageNet class-conditional generation benchmark, PixelFlow achieves an FID of 1.98. For text-to-image generation, PixelFlow is evaluated on widely-used benchmarks, achieving 0.64 on GenEval [19] and 77.93 on DPG-Bench [26]. In addition, qualitative results in Figure 5 and Figure 6 illustrate that PixelFlow has strong visual fidelity and text-image alignment, highlighting the potential of pixel-space generation for future research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 562, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 562, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 562, + 295, + 586 + ], + "type": "text", + "content": "The contributions of PixelFlow are summarized as in the following three points:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 591, + 295, + 711 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 56, + 591, + 295, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 591, + 295, + 627 + ], + "spans": [ + { + "bbox": [ + 56, + 591, + 295, + 627 + ], + "type": "text", + "content": "- By eliminating the need for a pre-trained VAE, we establish an end-to-end trainable image generation model in raw pixel space directly." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 628, + 295, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 628, + 295, + 662 + ], + "spans": [ + { + "bbox": [ + 56, + 628, + 295, + 662 + ], + "type": "text", + "content": "- Through cascade flow modeling from low resolution to high resolution, our model achieves affordable computation cost in both training and inference." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 663, + 295, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 663, + 295, + 711 + ], + "spans": [ + { + "bbox": [ + 56, + 663, + 295, + 711 + ], + "type": "text", + "content": "- PixelFlow obtains competitive performance in visual quality, including 1.98 FID on " + }, + { + "bbox": [ + 56, + 663, + 295, + 711 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 56, + 663, + 295, + 711 + ], + "type": "text", + "content": " ImageNet class-conditional image generation benchmark and appealing properties on text-to-image generation." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 71, + 400, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 71, + 400, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 71, + 400, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 91, + 553, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 553, + 224 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 553, + 224 + ], + "type": "text", + "content": "Latent Space Diffusion/Flow Models. Variational Autoencoders (VAEs) have become a core component in many recent generative models [16, 17, 35, 47, 48, 50, 59, 66], enabling the mapping of visual data from pixel space to a lower-dimensional, perceptually equivalent latent space. This compact representation facilitates more efficient training and inference. However, VAEs often compromise high-frequency details [47], leading to inevitable low-level artifacts in generated outputs. Motivated by a desire for algorithmic simplicity and fully end-to-end optimization, we forgo the VAE and operate directly in pixel space." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 242, + 553, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 242, + 553, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 242, + 553, + 445 + ], + "type": "text", + "content": "Pixel Space Diffusion/Flow Models. Early diffusion models [2, 21, 56] primarily operated directly in pixel space, aiming to capture the distributions images in a single stage. However, this approach proved both challenging and inefficient for high-resolution image generation, leading to the development of cascaded models [20, 22, 30, 52] that generate images through a sequence of stages. These cascaded models typically begin with the generation of a low-resolution image, which is subsequently upscaled by super-resolution models to achieve higher resolutions. However, the diffusion-based super-resolution process often requires starting from pure noise, conditioned on lower-resolution outputs, resulting in a time-consuming and inefficient generation process. Additionally, training these models in isolated stages hinders end-to-end optimization and necessitates carefully designed strategies to ensure the super-resolution stages." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 447, + 553, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 447, + 553, + 590 + ], + "spans": [ + { + "bbox": [ + 313, + 447, + 553, + 590 + ], + "type": "text", + "content": "Furthermore, recent advancements in pixel-space generation have introduced innovative architectures. Simple Diffusion [24, 25] proposes a streamlined diffusion framework for high-resolution image synthesis, achieving strong performance on ImageNet through adjustments of model architecture and noise schedules. FractalGen [37] constructs fractal generative models by recursively invoking atomic generative modules, resulting in self-similar architectures that demonstrate strong performance in pixel-by-pixel image generation. TarFlow [68] presents a Transformer-based normalizing flow architecture capable of directly modeling and generating pixels." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 603, + 380, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 603, + 380, + 615 + ], + "spans": [ + { + "bbox": [ + 314, + 603, + 380, + 615 + ], + "type": "text", + "content": "3. PixelFlow" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 624, + 471, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 471, + 636 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 471, + 636 + ], + "type": "text", + "content": "3.1. Preliminary: Flow Matching" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 641, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 553, + 713 + ], + "type": "text", + "content": "The Flow Matching algorithm [1, 38, 40] progressively transforms a sample from a prior distribution, which is typically a standard normal distribution, to the target data distribution. This is accomplished by defining a forward process consisting of a sequence of linear paths that directly connect samples from the prior distribution to corresponding" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 294, + 205 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 294, + 205 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 294, + 205 + ], + "type": "image", + "image_path": "6ba2736e7f8ebff671362f1038d26b7f985246d726e9b861a937b2791aafbb24.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 213, + 296, + 280 + ], + "lines": [ + { + "bbox": [ + 55, + 213, + 296, + 280 + ], + "spans": [ + { + "bbox": [ + 55, + 213, + 296, + 280 + ], + "type": "text", + "content": "Figure 2. PixelFlow for cascaded image generation from pixel space. We partition the entire generation procedure into series resolution stages. At the beginning of each resolution stage, we upscale the relatively noisy results from the preceding stage and use them as the starting point for the current stage. Consequently, as the resolution enhances, more refined samples can be obtained." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "text", + "content": "samples in the target distribution. During training, a training example is constructed by first sampling a target sample " + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_1" + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "text", + "content": ", drawing noise " + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0 \\sim \\mathcal{N}(0, 1)" + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "text", + "content": " from the standard normal distribution, and selecting a timestep " + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "inline_equation", + "content": "t \\in [0, 1]" + }, + { + "bbox": [ + 55, + 312, + 296, + 373 + ], + "type": "text", + "content": ". The training example is then defined through a linear interpolation:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 122, + 384, + 295, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 384, + 295, + 396 + ], + "spans": [ + { + "bbox": [ + 122, + 384, + 295, + 396 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} = t \\cdot \\mathbf {x} _ {1} + (1 - t) \\cdot \\mathbf {x} _ {0} \\tag {1}", + "image_path": "2ef03556144a3f07df0d650822db97bd860a68224ee0967d14ce8cfd30a207b3.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "text", + "content": "The model is trained to approximate the velocity defined by an ordinary differential equation (ODE), " + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_t = \\frac{d\\mathbf{x}_t}{dt}" + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "text", + "content": ", enabling it to effectively guide the transformation from the intermediate sample " + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "text", + "content": " to the real data sample " + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_1" + }, + { + "bbox": [ + 55, + 408, + 296, + 456 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 457, + 296, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 457, + 296, + 541 + ], + "spans": [ + { + "bbox": [ + 55, + 457, + 296, + 541 + ], + "type": "text", + "content": "A notable advantage of Flow Matching is its ability to interpolate between two arbitrary distributions, not restricted to using only a standard Gaussian as the source domain. Consequently, in image generation tasks, Flow Matching extends beyond noise-to-image scenarios and can be effectively employed for diverse applications such as image-to-image translation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 552, + 257, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 257, + 565 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 257, + 565 + ], + "type": "text", + "content": "3.2. Multi-Scale Generation in Pixel Space" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": "PixelFlow generates images by progressively increasing their resolution through a multistage denoising process. To enable this, we construct a multi-scale representation of the target image " + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_1" + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": " by recursively downsampling it by a factor of 2 at each scale. As illustrated in Figure 2, PixelFlow divides the image generation process into " + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": " stages. Each stage " + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "inline_equation", + "content": "s\\in 0,1,\\dots,S - 1" + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": " operates over a time interval defined by the start and end states " + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "inline_equation", + "content": "(\\mathbf{xt}_0^s,\\mathbf{xt}_1^s)" + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": ". In the degenerate case where " + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "inline_equation", + "content": "S = 1" + }, + { + "bbox": [ + 55, + 570, + 296, + 714 + ], + "type": "text", + "content": ", PixelFlow reduces to a standard single-stage flow matching approach for image generation, similar to recent works [17, 42], but crucially operates in pixel space rather than latent space." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 555, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 95 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 95 + ], + "type": "text", + "content": "For each stage " + }, + { + "bbox": [ + 313, + 72, + 555, + 95 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 313, + 72, + 555, + 95 + ], + "type": "text", + "content": ", we define the starting and ending states as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 322, + 105, + 555, + 119 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 105, + 555, + 119 + ], + "spans": [ + { + "bbox": [ + 322, + 105, + 555, + 119 + ], + "type": "interline_equation", + "content": "\\text {S t a r t :} \\quad \\mathbf {x} _ {t _ {0} ^ {s}} = t _ {0} ^ {s} \\cdot \\operatorname {U p} \\left(\\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s + 1}\\right)\\right) + \\left(1 - t _ {0} ^ {s}\\right) \\cdot \\epsilon \\tag {2}", + "image_path": "e6dd0f7a09ced4341a150bac664879ce25b786ee43a5758a39d74c9586dacc1f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 325, + 121, + 555, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 121, + 555, + 135 + ], + "spans": [ + { + "bbox": [ + 325, + 121, + 555, + 135 + ], + "type": "interline_equation", + "content": "\\text {E n d}: \\quad \\mathbf {x} _ {t _ {1} ^ {s}} = t _ {1} ^ {s} \\cdot \\operatorname {D o w n} \\left(\\mathbf {x} _ {1}, 2 ^ {s}\\right) + \\left(1 - t _ {1} ^ {s}\\right) \\cdot \\epsilon , \\tag {3}", + "image_path": "49e59133f35898f9f0b75a00267c4bcc31480d8106373d9eec10a3df2fcebdbb.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "spans": [ + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "inline_equation", + "content": "\\text{Down}(\\cdot)" + }, + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "inline_equation", + "content": "\\text{Up}(\\cdot)" + }, + { + "bbox": [ + 313, + 144, + 554, + 191 + ], + "type": "text", + "content": " denote the downsampling and upsampling operations, respectively. Unless otherwise stated, we adopt bilinear interpolation for downsampling and nearest neighbor for upsampling." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 192, + 554, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 192, + 554, + 226 + ], + "spans": [ + { + "bbox": [ + 313, + 192, + 554, + 226 + ], + "type": "text", + "content": "To train the model, we sample intermediate representations by linearly interpolating between the start and end states:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 372, + 236, + 555, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 236, + 555, + 251 + ], + "spans": [ + { + "bbox": [ + 372, + 236, + 555, + 251 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t _ {\\tau} ^ {s}} = \\tau \\cdot \\mathbf {x} _ {t _ {1} ^ {s}} + (1 - \\tau) \\cdot \\mathbf {x} _ {t _ {0} ^ {s}}, \\tag {4}", + "image_path": "2aa2c91c088d596f217bb74d209a97ca363f7b3f546d28a8f2151e1737d538ec.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "spans": [ + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "inline_equation", + "content": "\\tau = \\frac{t - t_0^s}{t_1^s - t_0^s}" + }, + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "text", + "content": " is the rescaled timestep [29, 65] within the " + }, + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 313, + 259, + 554, + 287 + ], + "type": "text", + "content": "-th stage." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "spans": [ + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "text", + "content": "Then our objective is to train a model " + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "inline_equation", + "content": "\\mu_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "text", + "content": " to predict the velocity " + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "inline_equation", + "content": "\\mu_{\\theta}(\\mathbf{x}_{t_{\\tau}^{s},\\tau})" + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "text", + "content": " with target as " + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_t = \\mathbf{x}_{t_1^s} - \\mathbf{x}_{t_0^s}" + }, + { + "bbox": [ + 313, + 287, + 554, + 323 + ], + "type": "text", + "content": ". We use the mean squared error (MSE) loss, formally represented as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 368, + 331, + 554, + 349 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 331, + 554, + 349 + ], + "spans": [ + { + "bbox": [ + 368, + 331, + 554, + 349 + ], + "type": "interline_equation", + "content": "\\left. \\mathbb {E} _ {s, t, \\left(\\mathbf {x} _ {t _ {1} ^ {s}}, \\mathbf {x} _ {t _ {1} ^ {s}}\\right)} \\right\\rvert \\left\\| \\mu_ {\\theta} \\left(\\mathbf {x} _ {t _ {\\tau} ^ {s}}, \\tau\\right) - \\mathbf {v} _ {t} \\right\\| ^ {2} \\tag {5}", + "image_path": "930b73d7ef35d23ff32bbb26a02538951982cb4c56d34618fa2400a3735f0f6d.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 356, + 429, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 356, + 429, + 368 + ], + "spans": [ + { + "bbox": [ + 314, + 356, + 429, + 368 + ], + "type": "text", + "content": "3.3. Model Architecture" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 373, + 554, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 373, + 554, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 554, + 456 + ], + "type": "text", + "content": "We instantiate " + }, + { + "bbox": [ + 313, + 373, + 554, + 456 + ], + "type": "inline_equation", + "content": "\\mu_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 313, + 373, + 554, + 456 + ], + "type": "text", + "content": " using a Transformer-based architecture [62], chosen for its simplicity, scalability, and effectiveness in generative modeling. Specifically, our implementation is based on the standard Diffusion Transformer (DiT) [45], employing XL-scale configurations across all experiments. To better align with the PixelFlow framework, we introduce several modifications, as detailed below." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 472, + 555, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 472, + 555, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 555, + 604 + ], + "type": "text", + "content": "Patchify. Following the Vision Transformer (ViT) design [15, 45], the first layer of PixelFlow is a patch embedding layer, which converts the spatial representation of the input image into a 1D sequence of tokens via a linear projection. In contrast to prior latent transformers [17, 42, 45] that operate on VAE-encoded latents, PixelFlow directly tokenizes raw pixel inputs. To support efficient attention across multiple resolutions within a batch, we apply a sequence packing strategy [11], concatenating flattened token sequences of varying lengths—corresponding to different resolutions—along the sequence dimension." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "type": "text", + "content": "RoPE. After patchfying, we replace the original sincos positional encoding [45] with RoPE [58] to better handle varying image resolutions. RoPE has shown strong performance in enabling length extrapolation, particularly in large language models. To adapt it for 2D image data, we apply 2D-RoPE by independently applying 1D-RoPE to the height and width dimensions, with each dimension occupying half of the hidden state." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 70, + 553, + 178 + ], + "blocks": [ + { + "bbox": [ + 60, + 70, + 553, + 178 + ], + "lines": [ + { + "bbox": [ + 60, + 70, + 553, + 178 + ], + "spans": [ + { + "bbox": [ + 60, + 70, + 553, + 178 + ], + "type": "image", + "image_path": "ce3d523162e2de6798738a7ada075092aa9b6d6abae010378ff834f3f0628d6c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 192, + 555, + 215 + ], + "lines": [ + { + "bbox": [ + 55, + 192, + 555, + 215 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 555, + 215 + ], + "type": "text", + "content": "Figure 3. Visualization of intermediate result of cascaded stages. We extract the intermediate results from each of the four stages for direct visualization. We observed a clear denoising process at various resolution stages." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 236, + 296, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 236, + 296, + 331 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 296, + 331 + ], + "type": "text", + "content": "Resolution Embedding. Since PixelFlow operates across multiple resolutions using a shared set of model parameters, we introduce an additional resolution embedding to distinguish between resolutions. Specifically, we use the absolute resolution of the feature map after patch embedding as a conditional signal. This signal is encoded using sinusoidal position embedding [62] and added to the timestep embedding before being passed into the model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 350, + 296, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 350, + 296, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 350, + 296, + 482 + ], + "type": "text", + "content": "Text-to-Image Generation. While class-conditional image generation typically integrates conditioning information through adaptive layer normalization (adaLN)[45], we extend PixelFlow to support text-to-image generation by introducing a cross-attention layer after each self-attention layer within every Transformer block [6, 7]. This design allows the model to effectively align visual features with the textual input at every stage of the generation process. Following recent work [8, 59], we adopt the Flan-T5-XL language model [10] to extract rich text embeddings, which serve as conditioning signals throughout the network." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 491, + 187, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 187, + 504 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 187, + 504 + ], + "type": "text", + "content": "3.4. Training and Inference" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 509, + 295, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 509, + 295, + 581 + ], + "spans": [ + { + "bbox": [ + 55, + 509, + 295, + 581 + ], + "type": "text", + "content": "To facilitate efficient training, we uniformly sample training examples from all resolution stages using the interpolation scheme defined in Equation (4). Additionally, we employ the sequence packing technique [11], which enables joint training of scale-variant examples within a single minibatch, improving both efficiency and scalability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 296, + 714 + ], + "type": "text", + "content": "During inference, the generation process begins with pure Gaussian noise at the lowest resolution and progressively transitions to higher resolutions through multiple stages. Within each resolution stage, we apply standard flow-based sampling, using either the Euler discrete sampler [17] or the Dopri5 solver, depending on the desired trade-off between speed and accuracy. To ensure smooth and coherent transitions across scales, we adopt an ronoising strategy [29, 60], which effectively mitigates the jumping point issue [4] often observed in multi-scale generation pipelines." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 235, + 395, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 235, + 395, + 248 + ], + "spans": [ + { + "bbox": [ + 313, + 235, + 395, + 248 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 255, + 555, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 255, + 555, + 327 + ], + "spans": [ + { + "bbox": [ + 313, + 255, + 555, + 327 + ], + "type": "text", + "content": "In this section, we first detail our experimental setup in Sec. 4.1. Subsequently, we analyze key components of our approach, including model design (Sec. 4.2) and inference configurations (Sec. 4.3). Finally, we benchmark PixelFlow against state-of-the-art methods on class- (Sec. 4.4) and text-to-image (Sec. 4.5) generation tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 333, + 430, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 333, + 430, + 346 + ], + "spans": [ + { + "bbox": [ + 313, + 333, + 430, + 346 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "text", + "content": "We evaluate PixelFlow for class-conditional image generation on the ImageNet-1K [12] dataset. Unless stated otherwise, we train PixelFlow at " + }, + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "text", + "content": " resolution. All models are trained using the AdamW optimizer [32, 41] with a constant learning rate of " + }, + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 350, + 554, + 445 + ], + "type": "text", + "content": ". Performance is primarily measured by Fréchet Inception Distance (FID) using the standard evaluation toolkit1. We also report Inception Score (IS) [53], sFID [43], and Precision/Recall [33]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "text", + "content": "For text-conditional image generation, we progressively train PixelFlow from " + }, + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "text", + "content": " up to " + }, + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 313, + 446, + 554, + 517 + ], + "type": "text", + "content": " resolution. We include qualitative comparisons with current start-of-the-art generative models, along with quantitative assessments on popular benchmarks such as T2I-CompBench [27], GenEval [19], and DPG-Bench [26]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 523, + 402, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 523, + 402, + 536 + ], + "spans": [ + { + "bbox": [ + 313, + 523, + 402, + 536 + ], + "type": "text", + "content": "4.2. Model Design" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 541, + 554, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 541, + 554, + 672 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 554, + 672 + ], + "type": "text", + "content": "Kickoff sequence length. In principle, PixelFlow can be trained to progressively increase resolution from very low resolution (e.g., " + }, + { + "bbox": [ + 313, + 541, + 554, + 672 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 541, + 554, + 672 + ], + "type": "text", + "content": ") up to the target resolution. However, this approach is inefficient in practice, as tokens at extremely low resolutions convey limited meaningful information. Furthermore, allocating excessive timesteps to very short sequences underutilizes the computational capacity of modern GPUs, resulting in decreased model FLOPS utilization. Therefore, we explore how varying the resolution at which image generation begins, which we call kickoff image resolution, impacts overall performance." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 673, + 554, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 673, + 554, + 696 + ], + "spans": [ + { + "bbox": [ + 313, + 673, + 554, + 696 + ], + "type": "text", + "content": "For our transformer-based backbone, the number of tokens involved in attention operations is determined by the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 325, + 703, + 533, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 703, + 533, + 712 + ], + "spans": [ + { + "bbox": [ + 325, + 703, + 533, + 712 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 71, + 292, + 126 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 292, + 126 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 292, + 126 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 292, + 126 + ], + "type": "table", + "html": "
kickoff seq. len.FID ↓sFID ↓IS ↑Precision ↑Recall ↑
32×323.346.1184.750.780.57
8×83.216.2378.500.780.56
2×23.496.4567.810.780.54
", + "image_path": "8beded7c1a3aa975ea063d812dd598403ae35915c9c6a44cc8f67d5cc82ffdac.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 59, + 182, + 292, + 342 + ], + "blocks": [ + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "lines": [ + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "spans": [ + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "type": "text", + "content": "Table 1. Effect of kickoff sequence length. All models are trained with 600k iterations on ImageNet-1K. Patch size is " + }, + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "type": "text", + "content": " and target image resolution is " + }, + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 55, + 135, + 295, + 169 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 182, + 292, + 342 + ], + "lines": [ + { + "bbox": [ + 59, + 182, + 292, + 342 + ], + "spans": [ + { + "bbox": [ + 59, + 182, + 292, + 342 + ], + "type": "table", + "html": "
patch sizeFID ↓sFID ↓IS ↑Precision ↑Recall ↑speed†
target res.64×64; kickoff seq.len.2×2; 600K iters
2×23.496.4567.810.780.541.28
4×43.415.5268.830.770.560.58
target res.256×256; kickoff seq.len.2×2; 100K iters
2×228.506.4047.370.580.5330.88
4×433.177.7142.290.570.527.31
8×847.509.6331.190.450.503.96
target res.256×256; kickoff seq.len.2×2; 1600K iters; EMA
4×42.815.48251.790.820.557.31
8×84.655.42195.500.790.543.96
", + "image_path": "e4f49219eea9ec876a4ab556fba046171327dec81549b16dd71d6a4fecb0320f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": "raw image resolution and the patch size. In this experiment, we maintain a consistent patch size of " + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": " [45], making the kickoff sequence length directly dependent on the kickoff image resolution. Specifically, we evaluate three kickoff sequence length—" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": " while keeping the target resolution fixed at " + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": ". Notably, the " + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 55, + 449, + 295, + 544 + ], + "type": "text", + "content": " setting represents a vanilla pixel-based approach without cascading across resolutions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "content": "As shown in Table 1, among these configurations, the " + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "content": " kickoff sequence length achieves comparable or even slightly improved FID compared to the " + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "content": " baseline. This suggests that initiating generation from an appropriately smaller resolution and progressively scaling up can maintain generation quality while improving computational efficiency by allocating fewer computations to the largest resolution stage. Conversely, reducing the kickoff sequence length further to " + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "content": " results in a performance degradation, likely because tokens at extremely low resolutions provide limited useful information and insufficient guidance for subsequent generation steps. Taking into account both generation quality and computational efficiency, we therefore adopt " + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 55, + 545, + 295, + 714 + ], + "type": "text", + "content": " as our default kickoff sequence length." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 315, + 71, + 555, + 137 + ], + "blocks": [ + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "lines": [ + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "spans": [ + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": "Table 2. Effect of patch size. All models have a kickoff sequence length of " + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": ". Upper: target resolution of " + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": "; Middle: target resolution of " + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": " resolution, training with 100K iterations due to computational constraints of patch size " + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": "; Bottom: Extended training to 1600K iterations at " + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 349, + 295, + 426 + ], + "type": "text", + "content": " resolution.†Speed measured as number of seconds per sample on a single GPU with a batchsize of 50." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 71, + 555, + 137 + ], + "lines": [ + { + "bbox": [ + 315, + 71, + 555, + 137 + ], + "spans": [ + { + "bbox": [ + 315, + 71, + 555, + 137 + ], + "type": "table", + "html": "
stepFID ↓sFID ↓IS ↑Precision ↑Recall ↑
103.395.98255.270.800.54
202.535.53272.130.820.56
302.515.82274.920.820.56
402.556.58272.680.810.56
", + "image_path": "7e57ec6108ab75d81d9644abadf0d757a0102e7a9fb3debaeb9465e0877cf73d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 315, + 163, + 555, + 207 + ], + "blocks": [ + { + "bbox": [ + 315, + 139, + 555, + 159 + ], + "lines": [ + { + "bbox": [ + 315, + 139, + 555, + 159 + ], + "spans": [ + { + "bbox": [ + 315, + 139, + 555, + 159 + ], + "type": "text", + "content": "(a) Effect of number of steps per stage. CFG is a global constant value 1.50, sample function is Euler." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 163, + 555, + 207 + ], + "lines": [ + { + "bbox": [ + 315, + 163, + 555, + 207 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 555, + 207 + ], + "type": "table", + "html": "
solverFID ↓sFID ↓IS ↑Precision ↑Recall ↑
Euler2.515.82274.920.820.56
Dopri52.435.38282.200.830.56
", + "image_path": "bc8cb8556707360b013a1190d7d5c509b2a7f2bea7b60c77621658eab126a3e8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 315, + 242, + 555, + 287 + ], + "blocks": [ + { + "bbox": [ + 314, + 209, + 555, + 238 + ], + "lines": [ + { + "bbox": [ + 314, + 209, + 555, + 238 + ], + "spans": [ + { + "bbox": [ + 314, + 209, + 555, + 238 + ], + "type": "text", + "content": "(b) Effect of sample function. CFG is a global constant value 1.50, the number of steps per stage is 30 in Euler, the absolute tolerance is 1e-6 in Dopri5." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 242, + 555, + 287 + ], + "lines": [ + { + "bbox": [ + 315, + 242, + 555, + 287 + ], + "spans": [ + { + "bbox": [ + 315, + 242, + 555, + 287 + ], + "type": "table", + "html": "
cfg schedulecfg max valueFID ↓IS ↑
global constant1.502.43282.2
stage-wise constant2.401.98282.1
", + "image_path": "6cbd33b515a21068b5fcb4146239d04f29be501e2f8283f26fcd92f989d561aa.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 288, + 555, + 308 + ], + "lines": [ + { + "bbox": [ + 314, + 288, + 555, + 308 + ], + "spans": [ + { + "bbox": [ + 314, + 288, + 555, + 308 + ], + "type": "text", + "content": "(c) Effect of classifier-free guidance (CFG) setting. Sample function is Dopri5 with absolute tolerance 1e-6." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 320, + 554, + 354 + ], + "lines": [ + { + "bbox": [ + 313, + 320, + 554, + 354 + ], + "spans": [ + { + "bbox": [ + 313, + 320, + 554, + 354 + ], + "type": "text", + "content": "Table 3. Inference Setting. The best performance is obtained by CFG step-wise constant with maximum value 2.40 and Dopri5 sample function." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": "Patch size. Next, we investigate the impact of patch size on model performance while maintaining a kickoff sequence length of " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": ". Initially, we experiment with a target resolution of " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " and compare two patch sizes—" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": "—with results presented in the upper section of Table 2. We observe that PixelFlow achieves very similar performance across these two settings, with the " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " patch slightly outperforming the " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " patch on four out of five evaluation metrics. Furthermore, using a patch size of " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " eliminates the highest-resolution stage required by the " + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 313, + 376, + 555, + 508 + ], + "type": "text", + "content": " patch size configuration, thus improving efficiency." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "content": "When scaling to a larger target resolution (i.e., " + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "content": "), employing a patch size of " + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "content": " becomes computationally infeasible due to substantial resource demands, limiting our experiments to only 100K training iterations (middle section of Table 2). This constraint necessitates adopting larger patch sizes. Although increasing the patch size further to " + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "content": " significantly enhances computational efficiency, it leads to a noticeable drop in performance quality. Moreover, this performance gap persists even after extended training (1600K iterations), as shown in the bottom section of Table 2. Considering both generation quality and computational cost, we therefore select a patch size of " + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 313, + 508, + 556, + 663 + ], + "type": "text", + "content": " as our default setting." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 671, + 426, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 671, + 426, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 671, + 426, + 684 + ], + "type": "text", + "content": "4.3. Inference Schedule" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "In Table 3, we provide a detailed analysis of the inference configuration space, including the number of inference" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 71, + 555, + 382 + ], + "blocks": [ + { + "bbox": [ + 56, + 71, + 555, + 382 + ], + "lines": [ + { + "bbox": [ + 56, + 71, + 555, + 382 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 555, + 382 + ], + "type": "image", + "image_path": "0e25921f60260761bdf0b391aa1492db1141e06144be12d09082ff2e7b581e9c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 90, + 390, + 518, + 401 + ], + "lines": [ + { + "bbox": [ + 90, + 390, + 518, + 401 + ], + "spans": [ + { + "bbox": [ + 90, + 390, + 518, + 401 + ], + "type": "text", + "content": "Figure 4. Qualitative results of class-conditional image generation of PixelFlow. All images are " + }, + { + "bbox": [ + 90, + 390, + 518, + 401 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 90, + 390, + 518, + 401 + ], + "type": "text", + "content": " resolution." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 420, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 420, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 420, + 295, + 445 + ], + "type": "text", + "content": "steps at each resolution stage, the choice of ODE solver, and the scheduling of classifier-free guidance (CFG)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 459, + 295, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 459, + 295, + 543 + ], + "spans": [ + { + "bbox": [ + 55, + 459, + 295, + 543 + ], + "type": "text", + "content": "Number of sample steps. In Table 3a, we evaluate the impact of the number of inference steps per resolution stage on generation quality. As the number of steps increases, we observe consistent improvements in FID, sFID, and IS, with the best overall performance achieved at 30 steps. Beyond this point, gains saturate and even slightly decline, indicating diminishing returns." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 543, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 543, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 543, + 295, + 638 + ], + "type": "text", + "content": "A notable advantage of PixelFlow is its flexibility in assigning different numbers of sampling steps to each resolution stage during inference. This adaptive configuration allows fine-grained control over the sampling process, enabling performance-efficiency trade-offs. Moving beyond a uniform setting and exploring more granular stage-specific step allocations holds the potential for further performance enhancements." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 653, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 295, + 713 + ], + "type": "text", + "content": "ODE Solver. We further investigate the effect of the ODE solver type on generation quality. As shown in Table 3b, we compare the first-order Euler solver with the adaptive higher-order Dormand-Prince (Dopri5) solver [14]. The results indicate that Dopri5 consistently outperforms Euler" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 420, + 555, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 555, + 504 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 555, + 504 + ], + "type": "text", + "content": "across most evaluation metrics, achieving lower FID and sFID scores, a higher Inception Score, and slightly better precision, while maintaining similar recall. This demonstrates that more accurate and adaptive solvers, such as Dopri5, can better capture the generative dynamics, leading to higher-quality samples—though often with increased computational cost." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "text", + "content": "CFG Schedule. Inspired by the recent process [5, 34, 63], we propose a stage-wise CFG schedule, where different stages apply different CFG values, and from the early stage to the later stage, the value increases from 1 to " + }, + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "inline_equation", + "content": "\\mathrm{CFG}_{\\mathrm{max}}" + }, + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "text", + "content": ". In the condition of 4 stages, we find that 0, 1/6, 2/3 and 1 of the " + }, + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "inline_equation", + "content": "(\\mathrm{CFG}_{\\mathrm{max}} - 1)" + }, + { + "bbox": [ + 313, + 521, + 556, + 641 + ], + "type": "text", + "content": " give the best FID performance. The comparison between global constant CFG and stage-wise CFG is shown in Table 3c, in which we search the best CFG value for each method. Our proposed stage-wise CFG boosts the FID performance from 2.43 to 1.98." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 659, + 515, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 515, + 672 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 515, + 672 + ], + "type": "text", + "content": "4.4. Comparison on ImageNet Benchmark" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": "In Table 4, we compare PixelFlow with both latent-based and pixel-based image generation models on the ImageNet " + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": " benchmark. PixelFlow achieves an FID of 1.98," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 293, + 312 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 293, + 312 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 293, + 312 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 293, + 312 + ], + "type": "table", + "html": "
ModelFID ↓ sFID ↓ IS ↑Precision ↑Recall ↑
Latent Space
LDM-4-G [50]3.60-247.70.870.48
DiT-XL/2 [45]2.274.60278.20.830.57
SiT-XL/2 [42]2.064.49277.50.830.59
Pixel Space
ADM-G [13]4.595.25186.70.820.52
ADM-U [13]3.946.14215.80.830.53
CDM [22]4.88-158.7--
RIN [9, 28]3.42-182.0--
SD, U-ViT-L [24]2.77-211.8--
MDM [20]3.51----
StyleGAN-XL [54]2.304.02265.10.780.53
VDM++ [31]2.12-267.7--
PaGoDA [30]1.56-259.6-0.59
SiD2 [25]1.38----
JetFormer [61]6.64--0.690.56
FractalMAR-H [37]6.15-348.90.810.46
PixelFlow (ours)1.985.83282.10.810.60
", + "image_path": "0e303bdb1b93ae43dcdd2c8a5980195c88032b7943e9c9c23810e68612c366c0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 376, + 295, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 376, + 295, + 460 + ], + "spans": [ + { + "bbox": [ + 54, + 376, + 295, + 460 + ], + "type": "text", + "content": "representing highly competitive performance relative to state-of-the-art latent-space methods. For instance, it outperforms LDM [50] (FID 3.60), DiT [45] (FID 2.27), and SiT [42] (FID 2.06), while achieving comparable IS and recall scores. These results highlight the effectiveness of our design, suggesting that PixelFlow can serve as a strong prototype for high-quality visual generation systems." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 461, + 296, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 461, + 296, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 461, + 296, + 521 + ], + "type": "text", + "content": "Compared with recent pixel-based models, PixelFlow achieves superior sample quality. It notably outperforms FractalMAR-H [37], and also delivers competitive or better results than strong baselines like ADM-U [13], SiD2 [25], and VDM++ [31]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 522, + 296, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 296, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 296, + 571 + ], + "type": "text", + "content": "We visualize class-conditional image generation of PixelFlow at " + }, + { + "bbox": [ + 55, + 522, + 296, + 571 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 522, + 296, + 571 + ], + "type": "text", + "content": " resolution in Figure 4. We can observe our model is able to generate images of high visual quality across a wide range of classes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 586, + 200, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 200, + 600 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 200, + 600 + ], + "type": "text", + "content": "4.5. Text-to-Image Generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "text", + "content": "Settings. We adopt a two-stage training strategy for text-to-image generation of PixelFlow. First, the model is initialized with an ImageNet-pretrained checkpoint at a resolution of " + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "text", + "content": " and trained on a subset of the LAION dataset [55] at the same resolution. In the second stage, we fine-tune the model on a curated set of high-aesthetic-quality images at a higher resolution of " + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "text", + "content": ". All reported results for PixelFlow are based on this final " + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 54, + 605, + 295, + 713 + ], + "type": "text", + "content": " resolution model." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 315, + 70, + 555, + 269 + ], + "blocks": [ + { + "bbox": [ + 55, + 319, + 295, + 354 + ], + "lines": [ + { + "bbox": [ + 55, + 319, + 295, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 319, + 295, + 354 + ], + "type": "text", + "content": "Table 4. Comparisons on class-conditional image generation on ImageNet " + }, + { + "bbox": [ + 55, + 319, + 295, + 354 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 319, + 295, + 354 + ], + "type": "text", + "content": ". PixelFlow achieves competitive performance compared with latent space based models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 70, + 555, + 269 + ], + "lines": [ + { + "bbox": [ + 315, + 70, + 555, + 269 + ], + "spans": [ + { + "bbox": [ + 315, + 70, + 555, + 269 + ], + "type": "table", + "html": "
MethodGenEval OverallT2I-CompBenchDPG Bench
ColorShapeTexture
SDv1.5 [50]0.430.37300.36460.421963.18
DALL-E 2 [49]0.520.57500.54640.6374-
SDv2.1 [50]0.500.56940.44950.4982-
SDXL [47]0.550.63690.54080.563774.65
PixArt-α [6]0.480.68860.55820.704471.11
DALL-E 3 [3]0.67†0.8110†0.6750†0.8070†83.50†
GenTron [7]-0.76740.57000.7150-
SD3 [17]0.74----
Transfusion [70]0.63----
LlamaGen [59]0.32----
Emu 3 [64]0.66†0.7913†0.5846†0.7422†80.60
PixelFlow (ours)0.600.75780.45290.600677.93
0.64†0.7689†0.5059†0.6273†
", + "image_path": "960f1767c92ece40bd94624c230c619ad8e2686c31b28e678b596ac051a2b79b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 277, + 555, + 322 + ], + "lines": [ + { + "bbox": [ + 313, + 277, + 555, + 322 + ], + "spans": [ + { + "bbox": [ + 313, + 277, + 555, + 322 + ], + "type": "text", + "content": "Table 5. Comparison with state-of-the-art models on text-to-image generation benchmarks. We evaluate on GenEval [19], T2I-CompBench [27] and DPG-Bench [26]. We use " + }, + { + "bbox": [ + 313, + 277, + 555, + 322 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 313, + 277, + 555, + 322 + ], + "type": "text", + "content": " to indicate the result with prompt rewriting." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 343, + 556, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 343, + 556, + 524 + ], + "spans": [ + { + "bbox": [ + 313, + 343, + 556, + 524 + ], + "type": "text", + "content": "To comprehensively evaluate the performance of PixelFlow-T2I in text-to-image generation, we employ three widely recognized benchmarks, each targeting a different facet of compositional understanding: T2I-CompBench [27] assesses alignment between generated images and complex semantic relationships in text. We evaluate three tasks—color, shape, and texture binding—by generating five images per prompt across 300 prompts per sub-task. Alignment is measured using BLIP-VQA[36]; GenEval [19] evaluates compositional aspects such as coherence and spatial arrangement. We generate over 2,000 images from 553 prompts and report the average performance across tasks; DPG-Bench [26] focuses on complex textual descriptions, with 4,000 images generated from 1,065 prompts and results averaged across tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 540, + 556, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 556, + 673 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 556, + 673 + ], + "type": "text", + "content": "Quantitative results. As shown in Table 5, PixelFlow achieves competitive performance across all benchmarks, demonstrating strong compositional understanding in freeform text-to-image generation. It performs particularly well on T2I-CompBench, with high scores in color and texture binding, and solid results on GenEval (0.64) and DPG-Bench (77.93), surpassing many established models. These results underscore PixelFlow as a promising direction for pixel-space image generation conditioned on natural language—showcasing its potential for open-ended, text-driven image synthesis." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "Visualization. We visualize the intermediate results during the sampling process in Figure 3, specifically show" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 72, + 251, + 261 + ], + "blocks": [ + { + "bbox": [ + 62, + 72, + 251, + 261 + ], + "lines": [ + { + "bbox": [ + 62, + 72, + 251, + 261 + ], + "spans": [ + { + "bbox": [ + 62, + 72, + 251, + 261 + ], + "type": "image", + "image_path": "dfa76b1e78167a2685a4cccf48e1fa4f40bc3d0764b6f9c45a96acf621b4f749.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 263, + 249, + 319 + ], + "lines": [ + { + "bbox": [ + 61, + 263, + 249, + 319 + ], + "spans": [ + { + "bbox": [ + 61, + 263, + 249, + 319 + ], + "type": "text", + "content": "A native Warrior shaman Bengal Cat with a black and white leopard pattern, blue eyes, short fur, and portrait pose, colorful feathers and colorful ornaments, a regal oil-style portrait of the queen of native Kitty shaman white Cat with wings and headdress. Nordic is kind and motherly, it has black eye makeup and her hair is in messy." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 60, + 322, + 154, + 415 + ], + "blocks": [ + { + "bbox": [ + 60, + 322, + 154, + 415 + ], + "lines": [ + { + "bbox": [ + 60, + 322, + 154, + 415 + ], + "spans": [ + { + "bbox": [ + 60, + 322, + 154, + 415 + ], + "type": "image", + "image_path": "829436484ff51a169826baf14a76a83de72c82cd2d33996b8e380de88ae808a6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 60, + 415, + 151, + 439 + ], + "lines": [ + { + "bbox": [ + 60, + 415, + 151, + 439 + ], + "spans": [ + { + "bbox": [ + 60, + 415, + 151, + 439 + ], + "type": "text", + "content": "1940s vintage colored photo of a well-groomed man, crew cut hair, front view, kodak portrait film" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 156, + 322, + 235, + 414 + ], + "blocks": [ + { + "bbox": [ + 156, + 322, + 235, + 414 + ], + "lines": [ + { + "bbox": [ + 156, + 322, + 235, + 414 + ], + "spans": [ + { + "bbox": [ + 156, + 322, + 235, + 414 + ], + "type": "image", + "image_path": "689652bc6b53f934f9785c5165066f8e39102900d667548baef4e2410fb2b10a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 415, + 249, + 455 + ], + "lines": [ + { + "bbox": [ + 155, + 415, + 249, + 455 + ], + "spans": [ + { + "bbox": [ + 155, + 415, + 249, + 455 + ], + "type": "text", + "content": "A cute 3 year old Chinese girl with a big head and a small body, hair is fluffy and messy tied in a pill head, big eyes, one eye blinking, doe mouth, playful and cute." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 257, + 71, + 351, + 164 + ], + "blocks": [ + { + "bbox": [ + 257, + 71, + 351, + 164 + ], + "lines": [ + { + "bbox": [ + 257, + 71, + 351, + 164 + ], + "spans": [ + { + "bbox": [ + 257, + 71, + 351, + 164 + ], + "type": "image", + "image_path": "f0a1ba1c09e004b15deafc39cd17fed3708d0f3127aa7ce8ffe40ba17efa03a5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 258, + 164, + 351, + 195 + ], + "lines": [ + { + "bbox": [ + 258, + 164, + 351, + 195 + ], + "spans": [ + { + "bbox": [ + 258, + 164, + 351, + 195 + ], + "type": "text", + "content": "An extremely happy American Cocker Spaniel is smiling and looking up at the camera with his head tilted to one side." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 356, + 70, + 449, + 163 + ], + "blocks": [ + { + "bbox": [ + 356, + 70, + 449, + 163 + ], + "lines": [ + { + "bbox": [ + 356, + 70, + 449, + 163 + ], + "spans": [ + { + "bbox": [ + 356, + 70, + 449, + 163 + ], + "type": "image", + "image_path": "9ec021b2de0c41610614f63b527621de689b8c074053a770f502bbda417ecf35.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 163, + 449, + 187 + ], + "lines": [ + { + "bbox": [ + 356, + 163, + 449, + 187 + ], + "spans": [ + { + "bbox": [ + 356, + 163, + 449, + 187 + ], + "type": "text", + "content": "Full body portrait of deer by side, visible realistic, with style as a painting in the style by Caravaggio" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 258, + 202, + 351, + 296 + ], + "blocks": [ + { + "bbox": [ + 258, + 202, + 351, + 296 + ], + "lines": [ + { + "bbox": [ + 258, + 202, + 351, + 296 + ], + "spans": [ + { + "bbox": [ + 258, + 202, + 351, + 296 + ], + "type": "image", + "image_path": "92fed5a1cf8a7216a94657393f910ae3da6beabb617da9b372b21359660d57ab.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 297, + 347, + 319 + ], + "lines": [ + { + "bbox": [ + 257, + 297, + 347, + 319 + ], + "spans": [ + { + "bbox": [ + 257, + 297, + 347, + 319 + ], + "type": "text", + "content": "Greeting card, party, hyped animal, open mouth, surprised excitement" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 356, + 202, + 449, + 296 + ], + "blocks": [ + { + "bbox": [ + 356, + 202, + 449, + 296 + ], + "lines": [ + { + "bbox": [ + 356, + 202, + 449, + 296 + ], + "spans": [ + { + "bbox": [ + 356, + 202, + 449, + 296 + ], + "type": "image", + "image_path": "e27a6f4ebd515a8c9e06c383f05bb0450c9b8cc87658670a50d089cdf9c075e3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 358, + 297, + 449, + 320 + ], + "lines": [ + { + "bbox": [ + 358, + 297, + 449, + 320 + ], + "spans": [ + { + "bbox": [ + 358, + 297, + 449, + 320 + ], + "type": "text", + "content": "Super cute clay world, isometric view of Eiffel Tower in Paris, cute clay stop motion animation, people" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 257, + 322, + 351, + 415 + ], + "blocks": [ + { + "bbox": [ + 257, + 322, + 351, + 415 + ], + "lines": [ + { + "bbox": [ + 257, + 322, + 351, + 415 + ], + "spans": [ + { + "bbox": [ + 257, + 322, + 351, + 415 + ], + "type": "image", + "image_path": "2598aaac23e271a9919f3d68b510ad6796050886ff5e22bb5b556ae898cc8c18.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 258, + 415, + 348, + 448 + ], + "lines": [ + { + "bbox": [ + 258, + 415, + 348, + 448 + ], + "spans": [ + { + "bbox": [ + 258, + 415, + 348, + 448 + ], + "type": "text", + "content": "Close-up of an aged man with weathered features and sharp blue eyes peering wisely from beneath a tweed flat cap." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 358, + 323, + 447, + 416 + ], + "blocks": [ + { + "bbox": [ + 358, + 323, + 447, + 416 + ], + "lines": [ + { + "bbox": [ + 358, + 323, + 447, + 416 + ], + "spans": [ + { + "bbox": [ + 358, + 323, + 447, + 416 + ], + "type": "image", + "image_path": "ae7891b6b48cc6233d1d37686117c09cf7b4ad7ebb7634ee6b21bd31c81de56d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 418, + 447, + 441 + ], + "lines": [ + { + "bbox": [ + 356, + 418, + 447, + 441 + ], + "spans": [ + { + "bbox": [ + 356, + 418, + 447, + 441 + ], + "type": "text", + "content": "A white bearded man's face emerges from a cloud of white butterflies, background is white" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 468, + 555, + 490 + ], + "lines": [ + { + "bbox": [ + 55, + 468, + 555, + 490 + ], + "spans": [ + { + "bbox": [ + 55, + 468, + 555, + 490 + ], + "type": "text", + "content": "Figure 5. Qualitative results of text-conditional generation of PixelFlow. All images are " + }, + { + "bbox": [ + 55, + 468, + 555, + 490 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 468, + 555, + 490 + ], + "type": "text", + "content": " resolution. Key components of the prompt are highlighted in RED." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 455, + 72, + 548, + 164 + ], + "blocks": [ + { + "bbox": [ + 455, + 72, + 548, + 164 + ], + "lines": [ + { + "bbox": [ + 455, + 72, + 548, + 164 + ], + "spans": [ + { + "bbox": [ + 455, + 72, + 548, + 164 + ], + "type": "image", + "image_path": "f1a6f6462f120aca1848d9adea07e7e02916b71d19d0e7609f25d0ed0debcfc0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 455, + 203, + 548, + 295 + ], + "blocks": [ + { + "bbox": [ + 457, + 165, + 550, + 203 + ], + "lines": [ + { + "bbox": [ + 457, + 165, + 550, + 203 + ], + "spans": [ + { + "bbox": [ + 457, + 165, + 550, + 203 + ], + "type": "text", + "content": "A digital art piece featuring a splitface portrait of a woman. The left side of face is in a calm, while the right side shows a more intense and red color" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 455, + 203, + 548, + 295 + ], + "lines": [ + { + "bbox": [ + 455, + 203, + 548, + 295 + ], + "spans": [ + { + "bbox": [ + 455, + 203, + 548, + 295 + ], + "type": "image", + "image_path": "4bf4b044f87e646bd766a7e0041ee5816da7c9dfabdbc88c300b35221f52b162.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 454, + 319, + 548, + 413 + ], + "blocks": [ + { + "bbox": [ + 456, + 296, + 546, + 319 + ], + "lines": [ + { + "bbox": [ + 456, + 296, + 546, + 319 + ], + "spans": [ + { + "bbox": [ + 456, + 296, + 546, + 319 + ], + "type": "text", + "content": "A baby cat stands on two legs. facing forward, wearing an Indian classical gloves and shoes." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 454, + 319, + 548, + 413 + ], + "lines": [ + { + "bbox": [ + 454, + 319, + 548, + 413 + ], + "spans": [ + { + "bbox": [ + 454, + 319, + 548, + 413 + ], + "type": "image", + "image_path": "7e3f68cc81e7cd28b1a3cd02cda899bdfad7c951332adfb75b8904502f1c90ee.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 456, + 418, + 548, + 449 + ], + "lines": [ + { + "bbox": [ + 456, + 418, + 548, + 449 + ], + "spans": [ + { + "bbox": [ + 456, + 418, + 548, + 449 + ], + "type": "text", + "content": "Johannes Vermeer, panda wearing pearl earrings, blue headbands, artwork Girl with a Pearl Earring oil painting," + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 54, + 510, + 295, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 510, + 295, + 643 + ], + "spans": [ + { + "bbox": [ + 54, + 510, + 295, + 643 + ], + "type": "text", + "content": "ing the final step of each resolution stage. As resolution increases, a clear denoising trend emerges—images become progressively cleaner and less noisy at each stage. Additional generated samples along with their input text prompts are shown in Figure 5 (512×512) and Figure 6 (1024×1024). PixelFlow demonstrates high visual fidelity and strong text-image alignment, effectively capturing key visual elements and their relationships from complex prompts. Notably, it generates fine-grained details—such as animal fur, human hair, and hat textures—highlighting its strong attention to detail in pixel space." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 55, + 656, + 128, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 656, + 128, + 669 + ], + "spans": [ + { + "bbox": [ + 55, + 656, + 128, + 669 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "content": "We introduce PixelFlow, a novel image generation model that re-think the predominance of latent space based models by directly operating on raw pixel space. By directly" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 510, + 555, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 555, + 608 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 555, + 608 + ], + "type": "text", + "content": "transforming between different resolution stages, our model exhibits a compelling advantage in simplicity and end-to-end trainability. On both class-conditional image generation and text-to-image generation benchmarks, PixelFlow has been proven to demonstrate competitive image generation capabilities compared to popular latent space-based methods. We hope that this new perspective will inspire future research in visual generation models." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 616, + 556, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 616, + 556, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 616, + 556, + 712 + ], + "type": "text", + "content": "Limitations Despite its advantages, PixelFlow still faces certain limitations. Although the model avoids full-resolution computation across all stages, the final stage requires full-resolution attention, which accounts for roughly " + }, + { + "bbox": [ + 313, + 616, + 556, + 712 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 313, + 616, + 556, + 712 + ], + "type": "text", + "content": " of the total inference time. Moreover, we observe that training convergence slows as the sequence length increases. Addressing these challenges presents opportunities for future improvements in efficiency and scalability." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 70, + 380, + 339 + ], + "blocks": [ + { + "bbox": [ + 110, + 70, + 380, + 339 + ], + "lines": [ + { + "bbox": [ + 110, + 70, + 380, + 339 + ], + "spans": [ + { + "bbox": [ + 110, + 70, + 380, + 339 + ], + "type": "image", + "image_path": "382f8d45ddca614bb480fe9f7a8e10e2908775db30a386026a4ade4a60172f9f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 341, + 375, + 370 + ], + "lines": [ + { + "bbox": [ + 111, + 341, + 375, + 370 + ], + "spans": [ + { + "bbox": [ + 111, + 341, + 375, + 370 + ], + "type": "text", + "content": "Raspberry in the form of women walk along the path of a fairy tale forest. She carries a jug of water with her. Her head is made of one big raspberry on which she has big and beautiful eyes, as well as nose and mouth." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 399, + 70, + 490, + 188 + ], + "blocks": [ + { + "bbox": [ + 399, + 70, + 490, + 188 + ], + "lines": [ + { + "bbox": [ + 399, + 70, + 490, + 188 + ], + "spans": [ + { + "bbox": [ + 399, + 70, + 490, + 188 + ], + "type": "image", + "image_path": "33486aad29bbb90aa86465c16413ccddf01d8d1a9adeff17f3b6ec707ed7a7a1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 188, + 499, + 221 + ], + "lines": [ + { + "bbox": [ + 386, + 188, + 499, + 221 + ], + "spans": [ + { + "bbox": [ + 386, + 188, + 499, + 221 + ], + "type": "text", + "content": "An embroidered sweater with an anatomical illustration of the human torso and chest, the skin is open to reveal the internal anatomy." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 384, + 223, + 502, + 339 + ], + "blocks": [ + { + "bbox": [ + 384, + 223, + 502, + 339 + ], + "lines": [ + { + "bbox": [ + 384, + 223, + 502, + 339 + ], + "spans": [ + { + "bbox": [ + 384, + 223, + 502, + 339 + ], + "type": "image", + "image_path": "3d24f25e0022f0473ae02438c61b68f810c8ea75a900967a8e7b99f7b4169784.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 377, + 203, + 472 + ], + "blocks": [ + { + "bbox": [ + 109, + 377, + 203, + 472 + ], + "lines": [ + { + "bbox": [ + 109, + 377, + 203, + 472 + ], + "spans": [ + { + "bbox": [ + 109, + 377, + 203, + 472 + ], + "type": "image", + "image_path": "aac1594d1fe3227964b93b143aa9bd09406790d42aee55a24bcc6a2b4154c6ce.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 474, + 200, + 500 + ], + "lines": [ + { + "bbox": [ + 110, + 474, + 200, + 500 + ], + "spans": [ + { + "bbox": [ + 110, + 474, + 200, + 500 + ], + "type": "text", + "content": "Photorealistic, 4k, a micro baby African Buffalo perched on a coffee cup" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 55, + 672, + 555, + 693 + ], + "lines": [ + { + "bbox": [ + 55, + 672, + 555, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 555, + 693 + ], + "type": "text", + "content": "Figure 6. Qualitative samples of PixelFlow. We present the generated images of " + }, + { + "bbox": [ + 55, + 672, + 555, + 693 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 55, + 672, + 555, + 693 + ], + "type": "text", + "content": " resolution. Key words are highlighted in RED." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 501, + 204, + 594 + ], + "blocks": [ + { + "bbox": [ + 109, + 501, + 204, + 594 + ], + "lines": [ + { + "bbox": [ + 109, + 501, + 204, + 594 + ], + "spans": [ + { + "bbox": [ + 109, + 501, + 204, + 594 + ], + "type": "image", + "image_path": "3692d5cc5a780a0a5c914a8de19ce40c80a3d05257bac2c213777c182b8aeeae.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 597, + 201, + 649 + ], + "lines": [ + { + "bbox": [ + 111, + 597, + 201, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 597, + 201, + 649 + ], + "type": "text", + "content": "Great Dane Dog sitting on a toilet bowl in wide bathroom, reading a large double page spread newspaper, sit like human. The background is in a white room." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 207, + 379, + 302, + 472 + ], + "blocks": [ + { + "bbox": [ + 207, + 379, + 302, + 472 + ], + "lines": [ + { + "bbox": [ + 207, + 379, + 302, + 472 + ], + "spans": [ + { + "bbox": [ + 207, + 379, + 302, + 472 + ], + "type": "image", + "image_path": "d180a5889fe2515f2799a5337322a963bbac86597d9a728f402fb0406306f195.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 474, + 299, + 499 + ], + "lines": [ + { + "bbox": [ + 209, + 474, + 299, + 499 + ], + "spans": [ + { + "bbox": [ + 209, + 474, + 299, + 499 + ], + "type": "text", + "content": "A picture of Joe rogan's head on a cat's body, sitting behind a podcasting microphone." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 207, + 501, + 302, + 594 + ], + "blocks": [ + { + "bbox": [ + 207, + 501, + 302, + 594 + ], + "lines": [ + { + "bbox": [ + 207, + 501, + 302, + 594 + ], + "spans": [ + { + "bbox": [ + 207, + 501, + 302, + 594 + ], + "type": "image", + "image_path": "8e2312a41faf07f10fae91853ba5b4c3c92b1b41f658daedd6637a40fbad39fd.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 597, + 299, + 649 + ], + "lines": [ + { + "bbox": [ + 209, + 597, + 299, + 649 + ], + "spans": [ + { + "bbox": [ + 209, + 597, + 299, + 649 + ], + "type": "text", + "content": "Full body shot of balenciaga fashion model and parrot hybrid with a human body and the head of the parrot. He is walking through a podium like a model." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 308, + 377, + 402, + 472 + ], + "blocks": [ + { + "bbox": [ + 386, + 341, + 501, + 375 + ], + "lines": [ + { + "bbox": [ + 386, + 341, + 501, + 375 + ], + "spans": [ + { + "bbox": [ + 386, + 341, + 501, + 375 + ], + "type": "text", + "content": "Prototype flying fox made from blown glass, Lino Tagliapietra style Muranean glassmaking, intricate details." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 308, + 377, + 402, + 472 + ], + "lines": [ + { + "bbox": [ + 308, + 377, + 402, + 472 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 402, + 472 + ], + "type": "image", + "image_path": "17b84eb273ffb350d431ec171e95947703648198191d356a26eeca493a7eadd9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 474, + 397, + 499 + ], + "lines": [ + { + "bbox": [ + 307, + 474, + 397, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 474, + 397, + 499 + ], + "type": "text", + "content": "3D illustration of the chip with text \"AI\" floating above it, with a blue color scheme." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 308, + 501, + 402, + 594 + ], + "blocks": [ + { + "bbox": [ + 308, + 501, + 402, + 594 + ], + "lines": [ + { + "bbox": [ + 308, + 501, + 402, + 594 + ], + "spans": [ + { + "bbox": [ + 308, + 501, + 402, + 594 + ], + "type": "image", + "image_path": "af011922a940b4656f8bfebb22da807911ec5a80f76519c367ace4b119726fc9.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 597, + 400, + 658 + ], + "lines": [ + { + "bbox": [ + 310, + 597, + 400, + 658 + ], + "spans": [ + { + "bbox": [ + 310, + 597, + 400, + 658 + ], + "type": "text", + "content": "Sketch sheet of anatomical studies by Leonardo da Vinci Iron man and weapons, show detailed studies of technology and body, use little soft details in red and gold for the armor, mathematic." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 407, + 377, + 502, + 472 + ], + "blocks": [ + { + "bbox": [ + 407, + 377, + 502, + 472 + ], + "lines": [ + { + "bbox": [ + 407, + 377, + 502, + 472 + ], + "spans": [ + { + "bbox": [ + 407, + 377, + 502, + 472 + ], + "type": "image", + "image_path": "c32148ab97591973dd086d1447f89537f63a1843935aa5912f270e9ed7ced30f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 407, + 474, + 500, + 499 + ], + "lines": [ + { + "bbox": [ + 407, + 474, + 500, + 499 + ], + "spans": [ + { + "bbox": [ + 407, + 474, + 500, + 499 + ], + "type": "text", + "content": "The world's smallest laughing baby Piggy, perched on someone's finger." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 407, + 501, + 501, + 595 + ], + "blocks": [ + { + "bbox": [ + 407, + 501, + 501, + 595 + ], + "lines": [ + { + "bbox": [ + 407, + 501, + 501, + 595 + ], + "spans": [ + { + "bbox": [ + 407, + 501, + 501, + 595 + ], + "type": "image", + "image_path": "d595fc36281d0c0c4ea8cd4eb1657f6509f2d51fe7437ccbfade8cdf93ea2175.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 407, + 597, + 497, + 649 + ], + "lines": [ + { + "bbox": [ + 407, + 597, + 497, + 649 + ], + "spans": [ + { + "bbox": [ + 407, + 597, + 497, + 649 + ], + "type": "text", + "content": "Telephoto lens shooting, panoramic view, a white sheep struggling desperately under the sea, with bubbles constantly popping out of its mouth, realistic and lifelike." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "text", + "content": "[1] Michael Samuel Albergo and Eric Vanden-Eijnden. Building normalizing flows with stochastic interpolants. In The Eleventh International Conference on Learning Representations, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 135, + 296, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 135, + 296, + 191 + ], + "spans": [ + { + "bbox": [ + 61, + 135, + 296, + 191 + ], + "type": "text", + "content": "[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "spans": [ + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "type": "text", + "content": "[3] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.opennai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 247, + 295, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 247, + 295, + 302 + ], + "spans": [ + { + "bbox": [ + 62, + 247, + 295, + 302 + ], + "type": "text", + "content": "[4] Andrew Campbell, William Harvey, Christian Dietrich Weilbach, Valentin De Bortoli, Tom Rainforth, and Arnaud Doucet. Trans-dimensional generative modeling via jump diffusion models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 302, + 295, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 302, + 295, + 346 + ], + "spans": [ + { + "bbox": [ + 62, + 302, + 295, + 346 + ], + "type": "text", + "content": "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 347, + 295, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 347, + 295, + 401 + ], + "spans": [ + { + "bbox": [ + 62, + 347, + 295, + 401 + ], + "type": "text", + "content": "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 4, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 403, + 296, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 403, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 62, + 403, + 296, + 468 + ], + "type": "text", + "content": "[7] Shoufa Chen, Mengmeng Xu, Jiawei Ren, Yuren Cong, Sen He, Yanping Xie, Animesh Sinha, Ping Luo, Tao Xiang, and Juan-Manuel Perez-Rua. Gentron: Diffusion transformers for image and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6441-6451, 2024. 1, 4, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 469, + 295, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 469, + 295, + 523 + ], + "spans": [ + { + "bbox": [ + 62, + 469, + 295, + 523 + ], + "type": "text", + "content": "[8] Shoufa Chen, Chongjian Ge, Yuqi Zhang, Yida Zhang, Fengda Zhu, Hao Yang, Hongxiang Hao, Hui Wu, Zhichao Lai, Yifei Hu, et al. Goku: Flow based video generative foundation models. arXiv preprint arXiv:2502.04896, 2025.1, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 525, + 295, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 525, + 295, + 546 + ], + "spans": [ + { + "bbox": [ + 62, + 525, + 295, + 546 + ], + "type": "text", + "content": "[9] Ting Chen. On the importance of noise scheduling for diffusion models. arXiv preprint arXiv:2301.10972, 2023. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 548, + 295, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 548, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 57, + 548, + 295, + 601 + ], + "type": "text", + "content": "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instructionfinetuned language models. Journal of Machine Learning Research, 25(70):1-53, 2024. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 602, + 295, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 602, + 295, + 668 + ], + "spans": [ + { + "bbox": [ + 57, + 602, + 295, + 668 + ], + "type": "text", + "content": "[11] Mostafa Dehghani, Basil Mustafa, Josip Djolonga, Jonathan Heek, Matthias Minderer, Mathilde Caron, Andreas Steiner, Joan Puigcerver, Robert Geirhos, Ibrahim M Alabdul-mohsin, et al. Patch n'pack: Navit, a vision transformer for any aspect ratio and resolution. Advances in Neural Information Processing Systems, 36, 2024. 3, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "type": "text", + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "text", + "content": "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 107, + 553, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 107, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 317, + 107, + 553, + 139 + ], + "type": "text", + "content": "[14] John R Dormand and Peter J Prince. A family of embedded runge-kutta formulae. Journal of computational and applied mathematics, 6(1):19-26, 1980. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 140, + 553, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 140, + 553, + 171 + ], + "spans": [ + { + "bbox": [ + 317, + 140, + 553, + 171 + ], + "type": "text", + "content": "[15] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 172, + 553, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 172, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 317, + 172, + 553, + 217 + ], + "type": "text", + "content": "[16] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 217, + 553, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 553, + 282 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 553, + 282 + ], + "type": "text", + "content": "[17] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1, 2, 3, 4, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 283, + 553, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 283, + 553, + 326 + ], + "spans": [ + { + "bbox": [ + 316, + 283, + 553, + 326 + ], + "type": "text", + "content": "[18] Zach Evans, CJ Carr, Josiah Taylor, Scott H Hawley, and Jordi Pons. Fast timing-conditioned latent audio diffusion. In *Forty-first International Conference on Machine Learning*, 2024. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 327, + 553, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 327, + 553, + 371 + ], + "spans": [ + { + "bbox": [ + 316, + 327, + 553, + 371 + ], + "type": "text", + "content": "[19] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36, 2024. 2, 4, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 372, + 553, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 372, + 553, + 415 + ], + "spans": [ + { + "bbox": [ + 316, + 372, + 553, + 415 + ], + "type": "text", + "content": "[20] Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Joshua M Susskind, and Navdeep Jaitly. Matryoshka diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 1, 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 415, + 553, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 415, + 553, + 437 + ], + "spans": [ + { + "bbox": [ + 316, + 415, + 553, + 437 + ], + "type": "text", + "content": "[21] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 438, + 555, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 438, + 555, + 481 + ], + "spans": [ + { + "bbox": [ + 316, + 438, + 555, + 481 + ], + "type": "text", + "content": "[22] Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. Journal of Machine Learning Research, 23(47):1-33, 2022. 1, 2, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "type": "text", + "content": "[23] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 525, + 553, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 525, + 553, + 570 + ], + "spans": [ + { + "bbox": [ + 317, + 525, + 553, + 570 + ], + "type": "text", + "content": "[24] Emiel Hoogeboom, Jonathan Heek, and Tim Salimans. simple diffusion: End-to-end diffusion for high resolution images. In International Conference on Machine Learning, pages 13213-13232. PMLR, 2023. 2, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "type": "text", + "content": "[25] Emiel Hoogeboom, Thomas Mensink, Jonathan Heek, Kay Lamerigts, Ruiqi Gao, and Tim Salimans. Simpler diffusion (sid2): 1.5 fid on imagenet512 with pixel-space diffusion. arXiv preprint arXiv:2410.19324, 2024. 2, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 614, + 553, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 614, + 553, + 658 + ], + "spans": [ + { + "bbox": [ + 317, + 614, + 553, + 658 + ], + "type": "text", + "content": "[26] Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024. 2, 4, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 658, + 553, + 713 + ], + "type": "text", + "content": "[27] Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023. 4, 7" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "type": "text", + "content": "[28] Allan Jabri, David Fleet, and Ting Chen. Scalable adaptive computation for iterative generation. arXiv preprint arXiv:2212.11972, 2022. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "type": "text", + "content": "[29] Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.3,4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 163, + 295, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 295, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 295, + 217 + ], + "type": "text", + "content": "[30] Dongjun Kim, Chieh-Hsin Lai, Wei-Hsiang Liao, Yuhta Takida, Naoki Murata, Toshimitsu Uesaka, Yuki Mitsufuji, and Stefano Ermon. Pagoda: Progressive growing of a one-step generator from a low-resolution diffusion teacher. arXiv preprint arXiv:2405.14822, 2024. 2, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 219, + 295, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 295, + 261 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 295, + 261 + ], + "type": "text", + "content": "[31] Diederik Kingma and Ruiqi Gao. Understanding diffusion objectives as the elbo with simple data augmentation. Advances in Neural Information Processing Systems, 36, 2024. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "type": "text", + "content": "[32] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "type": "text", + "content": "[33] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 343, + 295, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 295, + 397 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 295, + 397 + ], + "type": "text", + "content": "[34] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 399, + 295, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 295, + 420 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 295, + 420 + ], + "type": "text", + "content": "[35] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 422, + 295, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 422, + 295, + 475 + ], + "spans": [ + { + "bbox": [ + 56, + 422, + 295, + 475 + ], + "type": "text", + "content": "[36] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 477, + 295, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 477, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 477, + 295, + 510 + ], + "type": "text", + "content": "[37] Tianhong Li, Qinyi Sun, Lijie Fan, and Kaiming He. Fractal generative models. arXiv preprint arXiv:2502.17437, 2025.2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 511, + 295, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 511, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 511, + 295, + 555 + ], + "type": "text", + "content": "[38] Yaron Lipman, Ricky T. Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. Flow matching for generative modeling. In The Eleventh International Conference on Learning Representations, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 557, + 295, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 557, + 295, + 600 + ], + "spans": [ + { + "bbox": [ + 56, + 557, + 295, + 600 + ], + "type": "text", + "content": "[39] Haohe Liu, Zehua Chen, Yi Yuan, Xinhao Mei, Xubo Liu, Danilo Mandic, Wenwu Wang, and Mark D Plumbley. Audioldm: Text-to-audio generation with latent diffusion models. arXiv preprint arXiv:2301.12503, 2023. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 601, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 295, + 645 + ], + "type": "text", + "content": "[40] Xingchao Liu, Chengyue Gong, and qiang liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. In The Eleventh International Conference on Learning Representations, 2023. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 647, + 295, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 295, + 679 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 295, + 679 + ], + "type": "text", + "content": "[41] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "type": "text", + "content": "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 95, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 95, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 95, + 553, + 128 + ], + "type": "text", + "content": "[43] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 129, + 553, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 161 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 161 + ], + "type": "text", + "content": "[44] NVIDIA. Edify image: High-quality image generation with pixel space laplacian diffusion model. arXiv preprint arXiv:2411.07126, 2024. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 162, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 162, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 316, + 162, + 553, + 205 + ], + "type": "text", + "content": "[45] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 1, 2, 3, 4, 5, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 206, + 553, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 206, + 553, + 250 + ], + "spans": [ + { + "bbox": [ + 316, + 206, + 553, + 250 + ], + "type": "text", + "content": "[46] Pablo Pernias, Dominic Rampas, Mats L Richter, Christopher J Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. arXiv preprint arXiv:2306.00637, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 251, + 553, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 251, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 316, + 251, + 553, + 304 + ], + "type": "text", + "content": "[47] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 1, 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 305, + 553, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 553, + 350 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 553, + 350 + ], + "type": "text", + "content": "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International conference on machine learning, pages 8821-8831. Pmlr, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 350, + 553, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 553, + 392 + ], + "type": "text", + "content": "[49] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 394, + 555, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 394, + 555, + 449 + ], + "spans": [ + { + "bbox": [ + 316, + 394, + 555, + 449 + ], + "type": "text", + "content": "[50] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 449, + 553, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 553, + 514 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 553, + 514 + ], + "type": "text", + "content": "[51] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 515, + 553, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 515, + 553, + 568 + ], + "spans": [ + { + "bbox": [ + 316, + 515, + 553, + 568 + ], + "type": "text", + "content": "[52] Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image superresolution via iterative refinement. IEEE transactions on pattern analysis and machine intelligence, 45(4):4713-4726, 2022. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 570, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 570, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 316, + 570, + 553, + 613 + ], + "type": "text", + "content": "[53] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 614, + 553, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 614, + 553, + 647 + ], + "spans": [ + { + "bbox": [ + 316, + 614, + 553, + 647 + ], + "type": "text", + "content": "[54] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 553, + 713 + ], + "type": "text", + "content": "[55] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 7" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 126 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 126 + ], + "type": "text", + "content": "[56] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pages 2256-2265. PMLR, 2015. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 183 + ], + "type": "text", + "content": "[57] Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, et al. Ldm3d: Latent diffusion model for 3d. arXiv preprint arXiv:2305.10853, 2023. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 185, + 294, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 185, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 185, + 294, + 228 + ], + "type": "text", + "content": "[58] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 231, + 294, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 231, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 231, + 294, + 274 + ], + "type": "text", + "content": "[59] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 2, 4, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "type": "text", + "content": "[60] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 321, + 294, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 321, + 294, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 321, + 294, + 363 + ], + "type": "text", + "content": "[61] Michael Tschannen, André Susano Pinto, and Alexander Kolesnikov. Jetformer: An autoregressive generative model of raw images and text. arXiv preprint arXiv:2411.19722, 2024. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "type": "text", + "content": "[62] Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Neural Information Processing Systems, 2017. 3, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 411, + 294, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 294, + 454 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 294, + 454 + ], + "type": "text", + "content": "[63] Xi Wang, Nicolas Dufour, Nefeli Andreou, Marie-Paule Cani, Victoria Fernández Abrevaya, David Picard, and Vicky Kalogeiton. Analysis of classifier-free guidance weight schedulers. arXiv preprint arXiv:2404.13040, 2024. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 456, + 294, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 456, + 294, + 499 + ], + "spans": [ + { + "bbox": [ + 56, + 456, + 294, + 499 + ], + "type": "text", + "content": "[64] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 502, + 294, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 502, + 294, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 502, + 294, + 555 + ], + "type": "text", + "content": "[65] Hanshu Yan, Xingchao Liu, Jiachun Pan, Jun Hao Liew, qiang liu, and Jiashi Feng. PeRFlow: Piecewise rectified flow as universal plug-and-play accelerator. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "spans": [ + { + "bbox": [ + 56, + 557, + 294, + 610 + ], + "type": "text", + "content": "[66] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "spans": [ + { + "bbox": [ + 56, + 613, + 294, + 657 + ], + "type": "text", + "content": "[67] Xiaohui Zeng, Arash Vahdat, Francis Williams, Zan Gojcic, Or Litany, Sanja Fidler, and Karsten Kreis. LION: Latent point diffusion models for 3d shape generation. In Advances in Neural Information Processing Systems, 2022. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "text", + "content": "[68] Shuangfei Zhai, Ruixiang Zhang, Preetum Nakkiran, David Berthelot, Jiatao Gu, Huangjie Zheng, Tianrong Chen, Miguel Angel Bautista, Navdeep Jaitly, and Josh Susskind. Normalizing flows are capable generative models. arXiv preprint arXiv:2412.06329, 2024. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 183 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "type": "text", + "content": "[69] Shilong Zhang, Wenbo Li, Shoufa Chen, Chongjian Ge, Peize Sun, Yida Zhang, Yi Jiang, Zehuan Yuan, Binyue Peng, and Ping Luo. Flashvideo: Flowing fidelity to detail for efficient high-resolution video generation. arXiv preprint arXiv:2502.05179, 2025. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "type": "text", + "content": "[70] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 7" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file