Chelsea707 commited on
Commit
3a166a1
·
verified ·
1 Parent(s): 6c93bde

MinerU Batch e87e1d9f-d17b-4b8a-bbe3-e5d8a7ae5e47 (Part 5/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_content_list.json +0 -0
  3. data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_model.json +0 -0
  4. data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf +3 -0
  5. data/2025/2504_07xxx/2504.07951/full.md +650 -0
  6. data/2025/2504_07xxx/2504.07951/images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg +3 -0
  7. data/2025/2504_07xxx/2504.07951/images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg +3 -0
  8. data/2025/2504_07xxx/2504.07951/images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg +3 -0
  9. data/2025/2504_07xxx/2504.07951/images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg +3 -0
  10. data/2025/2504_07xxx/2504.07951/images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg +3 -0
  11. data/2025/2504_07xxx/2504.07951/images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg +3 -0
  12. data/2025/2504_07xxx/2504.07951/images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg +3 -0
  13. data/2025/2504_07xxx/2504.07951/images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg +3 -0
  14. data/2025/2504_07xxx/2504.07951/images/12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg +3 -0
  15. data/2025/2504_07xxx/2504.07951/images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg +3 -0
  16. data/2025/2504_07xxx/2504.07951/images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg +3 -0
  17. data/2025/2504_07xxx/2504.07951/images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg +3 -0
  18. data/2025/2504_07xxx/2504.07951/images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg +3 -0
  19. data/2025/2504_07xxx/2504.07951/images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg +3 -0
  20. data/2025/2504_07xxx/2504.07951/images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg +3 -0
  21. data/2025/2504_07xxx/2504.07951/images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg +3 -0
  22. data/2025/2504_07xxx/2504.07951/images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg +3 -0
  23. data/2025/2504_07xxx/2504.07951/images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg +3 -0
  24. data/2025/2504_07xxx/2504.07951/images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg +3 -0
  25. data/2025/2504_07xxx/2504.07951/images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg +3 -0
  26. data/2025/2504_07xxx/2504.07951/images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg +3 -0
  27. data/2025/2504_07xxx/2504.07951/images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg +3 -0
  28. data/2025/2504_07xxx/2504.07951/images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg +3 -0
  29. data/2025/2504_07xxx/2504.07951/images/318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg +3 -0
  30. data/2025/2504_07xxx/2504.07951/images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg +3 -0
  31. data/2025/2504_07xxx/2504.07951/images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg +3 -0
  32. data/2025/2504_07xxx/2504.07951/images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg +3 -0
  33. data/2025/2504_07xxx/2504.07951/images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg +3 -0
  34. data/2025/2504_07xxx/2504.07951/images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg +3 -0
  35. data/2025/2504_07xxx/2504.07951/images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg +3 -0
  36. data/2025/2504_07xxx/2504.07951/images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg +3 -0
  37. data/2025/2504_07xxx/2504.07951/images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg +3 -0
  38. data/2025/2504_07xxx/2504.07951/images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg +3 -0
  39. data/2025/2504_07xxx/2504.07951/images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg +3 -0
  40. data/2025/2504_07xxx/2504.07951/images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg +3 -0
  41. data/2025/2504_07xxx/2504.07951/images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg +3 -0
  42. data/2025/2504_07xxx/2504.07951/images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg +3 -0
  43. data/2025/2504_07xxx/2504.07951/images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg +3 -0
  44. data/2025/2504_07xxx/2504.07951/images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg +3 -0
  45. data/2025/2504_07xxx/2504.07951/images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg +3 -0
  46. data/2025/2504_07xxx/2504.07951/images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg +3 -0
  47. data/2025/2504_07xxx/2504.07951/images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg +3 -0
  48. data/2025/2504_07xxx/2504.07951/images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg +3 -0
  49. data/2025/2504_07xxx/2504.07951/images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg +3 -0
  50. data/2025/2504_07xxx/2504.07951/images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg +3 -0
.gitattributes CHANGED
@@ -1236,3 +1236,11 @@ data/2025/2504_08xxx/2504.08183/dd9ad541-c188-40b6-8fd4-ceae96bfad95_origin.pdf
1236
  data/2025/2504_08xxx/2504.08192/ef5ba100-581f-437d-8edc-97d27f723fa4_origin.pdf filter=lfs diff=lfs merge=lfs -text
1237
  data/2025/2504_08xxx/2504.08204/2b821f7b-0121-4811-b017-bc29978129ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
1238
  data/2025/2504_08xxx/2504.08358/c8c54375-95ac-4560-8575-6646aab725d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1236
  data/2025/2504_08xxx/2504.08192/ef5ba100-581f-437d-8edc-97d27f723fa4_origin.pdf filter=lfs diff=lfs merge=lfs -text
1237
  data/2025/2504_08xxx/2504.08204/2b821f7b-0121-4811-b017-bc29978129ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
1238
  data/2025/2504_08xxx/2504.08358/c8c54375-95ac-4560-8575-6646aab725d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
1239
+ data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
1240
+ data/2025/2504_07xxx/2504.07952/cb21f510-4f57-45bf-801e-d15a299a4bb4_origin.pdf filter=lfs diff=lfs merge=lfs -text
1241
+ data/2025/2504_07xxx/2504.07954/621d9657-2f17-466c-9c96-d67a0e66a5bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
1242
+ data/2025/2504_07xxx/2504.07957/e7d6f608-881f-440b-b4c2-c90cf439fe0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
1243
+ data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1244
+ data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf filter=lfs diff=lfs merge=lfs -text
1245
+ data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1246
+ data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07951/942fad52-8833-45ad-88a5-5de4992284e8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b847b7773af318736aca0b86d2cae77cfb60535aa01da672ef9577890d41dbf6
3
+ size 856853
data/2025/2504_07xxx/2504.07951/full.md ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Scaling Laws for Native Multimodal Models
2
+
3
+ Mustafa Shukor²
4
+
5
+ Enrico Fini
6
+
7
+ Victor Guilherme Turrisi da Costa<sup>1</sup>
8
+
9
+ Matthieu Cord²
10
+
11
+ Joshua Susskind
12
+
13
+ Alaaeldin El-Nouby
14
+
15
+ 1Apple
16
+
17
+ $^{2}$ Sorbonne University
18
+
19
+ # Abstract
20
+
21
+ Building general-purpose models that can effectively perceive the world through multimodal signals has been a long-standing goal. Current approaches involve integrating separately pre-trained components, such as connecting vision encoders to LLMs and continuing multimodal training. While such approaches exhibit remarkable sample efficiency, it remains an open question whether such late-fusion architectures are inherently superior. In this work, we revisit the architectural design of native multimodal models (NMMs)-those trained from the ground up on all modalities—and conduct an extensive scaling laws study, spanning 457 trained models with different architectures and training mixtures. Our investigation reveals no inherent advantage to late-fusion architectures over early-fusion ones, which do not rely on image encoders or tokenizers. On the contrary, early-fusion exhibits stronger performance at lower parameter counts, is more efficient to train, and is easier to deploy. Motivated by the strong performance of the early-fusion architectures, we show that incorporating Mixture of Experts (MoEs) allows models to learn modality-specific weights, significantly benefiting performance.
22
+
23
+ # 1. Introduction
24
+
25
+ Multimodality provides a rich signal for perceiving and understanding the world. Advances in vision [23, 52, 55, 80] and language models [3, 19, 67] have enabled the development of powerful multimodal models that understand language, images, and audio. A common approach involves grafting separately pre-trained unimodal models, such as connecting a vision encoder to the input layer of an LLM [6, 9, 35, 43, 62, 64, 73, 78].
26
+
27
+ Although this seems like a convenient approach, it remains an open question whether such late-fusion strategies are inherently optimal for understanding multimodal signals. Moreover, with abundant multimodal data available, initializing from unimodal pre-training is potentially detrimental, as it may introduce biases that prevent the model
28
+
29
+ ![](images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg)
30
+
31
+ ![](images/aa4d2e50304f1b35bc419434fa34759b233eea950df0d8cf73344bc441f1cd30.jpg)
32
+ FLOPs
33
+ FLOPs
34
+ Figure 1. Scaling properties of Native Multimodal Models. Based on the scaling laws study in § 3.1, we observe: (1) early and late fusion models provide similar validation loss $L$ when trained with the same compute budget $C$ (FLOPs); (2) This performance is achieved via a different trade-off between parameters $N$ and number of training tokens $D$ , where early-fusion models require fewer parameters. (3) Sparse early-fusion models achieve lower loss and require more training tokens for a given FLOP budget.
35
+
36
+ from fully leveraging cross-modality co-dependancies. An additional challenge is scaling such systems; each component (e.g., vision encoder, LLM) has its own set of hyperparameters, pre-training data mixtures, and scaling properties with respect to the amount of data and compute applied. A more flexible architecture might allow the model to dynamically allocate its capacity across modalities, simplifying scaling efforts.
37
+
38
+ In this work, we focus on the scaling properties of native multimodal models trained from the ground up on multimodal data. We first investigate whether the commonly adopted late-fusion architectures hold an intrinsic advantage by comparing them to early-fusion models, which process raw multimodal inputs without relying on dedicated vision encoders. We conduct scaling experiments on early and late fusion architectures, deriving scaling laws to pre
39
+
40
+ dict their performance and compute-optimal configurations. Our findings indicate that late fusion offers no inherent advantage when trained from scratch. Instead, early-fusion models are more efficient and are easier to scale. Furthermore, we observe that native multimodal models follow scaling laws similar to those of LLMs [26], albeit with slight variations in scaling coefficients across modalities and datasets. Our results suggest that model parameters and training tokens should be scaled roughly equally for optimal performance. Moreover, we find that different multimodal training mixtures exhibit similar overall trends, indicating that our findings are likely to generalize to a broader range of settings.
41
+
42
+ While our findings favor early fusion, multimodal data is inherently heterogeneous, suggesting that some degree of parameter specialization may still offer benefits. To investigate this, we explore leveraging Mixture of Experts (MoEs) [59], a technique that enables the model to dynamically allocate specialized parameters across modalities in a symmetric and parallel manner, in contrast to late-fusion models, which are asymmetric and process data sequentially. Training native multimodal models with MoEs results in significantly improved performance and therefore, faster convergence. Our scaling laws for MoEs suggest that scaling number of training tokens is more important than the number of active parameters. This unbalanced scaling is different from what is observed for dense models, due to the higher number of total parameters for sparse models. In addition, Our analysis reveals that experts tend to specialize in different modalities, with this specialization being particularly prominent in the early and last layers.
43
+
44
+ # 1.1. Summary of our findings
45
+
46
+ Our findings can be summarized as follows:
47
+
48
+ Native Early and Late fusion perform on par: Early fusion models trained from scratch perform on par with their late-fusion counterparts, with a slight advantage to early-fusion models for low compute budgets (Figure 3). Furthermore, our scaling laws study indicates that the compute-optimal models for early and late fusion perform similarly as the compute budget increases (Figure 1 Top).
49
+
50
+ NMMs scale similarly to LLMs: The scaling laws of native multimodal models follow similar laws as text-only LLMs with slightly varying scaling exponents depending on the target data type and training mixture (Table 2).
51
+
52
+ Late-fusion requires more parameters: Compute-optimal late-fusion models require a higher parameters-to-data ratio when compared to early-fusion (Figure 1 bottom).
53
+
54
+ Sparsity significantly benefits early-fusion NMMs: Sparse NMMs exhibit significant improvements compared to their dense counterparts at the same inference cost (Figure 10). Furthermore, they implicitly learn modality-specific weights when trained with sparsity (Figure 12). In
55
+
56
+ <table><tr><td>Expression</td><td>Definition</td></tr><tr><td>N</td><td>Number of parameters in the multimodal decoder. For MoEs this refers to the active parameters only.</td></tr><tr><td>D</td><td>Total number of multimodal tokens.</td></tr><tr><td>Nv</td><td>Number of parameters in the vision-specific encoder. Only exists in late-fusion architectures.</td></tr><tr><td>Dv</td><td>Number of vision-only tokens.</td></tr><tr><td>C</td><td>Total number of FLOPs, estimated as C = 6ND for early-fusion and C = 6(NvDv + ND) for late-fusion.</td></tr><tr><td>L</td><td>Validation loss measured as the average over interleaved image-text, image-caption, and text-only data mixtures.</td></tr></table>
57
+
58
+ Table 1. Definitions of the expressions used throughout the paper.
59
+
60
+ addition, compute-optimal models rely more on scaling the number of training tokens than the number of active parameters as the compute-budget grows (Figure 1 Bottom).
61
+
62
+ Modality-agnostic routing beats Modality-aware routing for Sparse NMMs: Training sparse mixture of experts with modality-agnostic routing consistently outperforms models with modality-aware routing (Figure 11).
63
+
64
+ # 2. Preliminaries
65
+
66
+ # 2.1. Definitions
67
+
68
+ Native Multimodal Models (NMMs): Models that are trained from scratch on all modalities simultaneously without relying on pre-trained LLMs or vision encoders. Our focus is on the representative image and text modalities, where the model processes both text and images as input and generates text as output.
69
+
70
+ Early fusion: Enabling multimodal interaction from the beginning, using almost no modality-specific parameters (e.g., except a linear layer to patchify images). Using a single transformer model, this approach processes raw multimodal input—tokenized text and continuous image patches—with no image discretization. In this paper, we refer to the main transformer as the decoder.
71
+
72
+ Late fusion: Delaying the multimodal interaction to deeper layers, typically after separate unimodal components has processed that process each modality independently (e.g., a vision encoder connected to a decoder).
73
+
74
+ Modality-agnostic routing: In sparse mixture-of-experts, modality-agnostic routing refers to relying on a learned router module that is trained jointly with the model.
75
+
76
+ Modality-aware routing: Routing based on pre-defined rules such as routing based on the modality type (e.g., vision-tokens, token-tokens).
77
+
78
+ # 2.2. Scaling Laws
79
+
80
+ We aim to understand the scaling properties of NMMs and how different architectural choices influence trade-offs. To this end, we analyze our models within the scaling laws framework proposed by Hoffmann et al. [26], Kaplan et al. [31]. We compute FLOPs based on the total number of parameters, using the approximation $C = 6ND$ , as adopted in prior work [2, 26]. However, we modify this estimation to suit our setup: for late-fusion models, FLOPs is computed
81
+
82
+ ![](images/849e76211de227d7564ee66c874ace735709b77d3978a5168211969bf19288d8.jpg)
83
+ Figure 2. Scaling laws for early-fusion and late-fusion native multimodal models. Each point represents a model (300M to 3B parameters) trained on varying number of tokens (250M to 400B). We report the average cross-entropy loss on the validation sets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM).
84
+
85
+ as $6(N_{v}D_{v} + ND)$ . We consider a setup where, given a compute budget $C$ , our goal is to predict the model's final performance, as well as determine the optimal number of parameters or number of training tokens. Consistent with prior studies on LLM scaling [26], we assume a power-law relationship between the final model loss and both model size $(N)$ and training tokens $(D)$ :
86
+
87
+ $$
88
+ L = E + \frac {A}{N ^ {\alpha}} + \frac {B}{D ^ {\beta}}. \tag {1}
89
+ $$
90
+
91
+ Here, $E$ represents the lowest achievable loss on the dataset, while $\frac{A}{N^{\alpha}}$ captures the effect of increasing the number of parameters, where a larger model leads to lower loss, with the rate of improvement governed by $\alpha$ . Similarly, $\frac{B}{D^{\beta}}$ accounts for the benefits of a higher number of tokens, with $\beta$ determining the rate of improvement. Additionally, we assume a linear relationship between compute budget (FLOPs) and both $N$ and $D$ ( $C \propto ND$ ). This further leads to power-law relationships detailed in Appendix C.7.
92
+
93
+ # 2.3. Experimental setup
94
+
95
+ Our models are based on the autoregressive transformer architecture [71] with SwiGLU FFNs [58] and QK-Norm [17] following Li et al. [39]. In early-fusion models, image patches are linearly projected to match the text token dimension, while late-fusion follows the CLIP architecture [55]. We adopt causal attention for text tokens and bidirectional attention for image tokens, we found this to work better. Training is conducted on a mixture of public and private multimodal datasets, including DCLM [39], Obelics [34], DFN [21], COYO [11], and a private collection of High-Quality Image-Text Pairs (HQITP). Images are resized to $224 \times 224$ resolution with a $14 \times 14$ patch size. We use a context length of 1k for the multimodal sequences. For training efficiency, we train our models with bfloat16, Fully Sharded Data Parallel (FSDP) [82], activation checkpointing, and gradient accumulation. We also use se
96
+
97
+ <table><tr><td colspan="2">L = E + A/Nα + B/Dβ</td><td colspan="2">N ∝ Ca</td><td colspan="2">D ∝Cb</td><td colspan="2">L ∝Cc</td><td colspan="2">D ∝Nd</td></tr><tr><td>Model</td><td>Data</td><td>E</td><td>α</td><td>β</td><td>a</td><td>b</td><td>c</td><td>d</td><td></td></tr><tr><td>GPT3 [10]</td><td>Text</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-0.048</td><td></td><td></td></tr><tr><td>Chinchilla [26]</td><td>Text</td><td>1.693</td><td>0.339</td><td>0.285</td><td>0.46</td><td>0.54</td><td>-</td><td></td><td></td></tr><tr><td rowspan="4">NMM (early-fusion)</td><td>Text</td><td>2.222</td><td>0.3084</td><td>0.3375</td><td>0.5246</td><td>0.4774</td><td>-0.0420</td><td>0.9085</td><td></td></tr><tr><td>Image-Caption</td><td>1.569</td><td>0.3111</td><td>0.3386</td><td>0.5203</td><td>0.4785</td><td>-0.0610</td><td>0.9187</td><td></td></tr><tr><td>Interleaved</td><td>1.966</td><td>0.2971</td><td>0.338</td><td>0.5315</td><td>0.4680</td><td>-0.0459</td><td>0.8791</td><td></td></tr><tr><td>AVG</td><td>1.904</td><td>0.301</td><td>0.335</td><td>0.5262</td><td>0.473</td><td>-0.0492</td><td>0.8987</td><td></td></tr><tr><td>NMM (late-fusion)</td><td>AVG</td><td>1.891</td><td>0.2903</td><td>0.3383</td><td>0.6358</td><td>0.4619</td><td>-0.0494</td><td>0.6732</td><td></td></tr><tr><td>Sparse NMM (early-fusion)</td><td>AVG</td><td>2.158</td><td>0.710</td><td>0.372</td><td>0.361</td><td>0.656</td><td>-0.047</td><td>1.797</td><td></td></tr></table>
98
+
99
+ Table 2. Scaling laws for native multimodal models. We report the scaling laws results for early and late fusion models. We fit the scaling laws for different target data types as well as their average loss (AVG).
100
+
101
+ quence packing for the image captioning dataset to reduce the amount of padded tokens. Similar to previous works [2, 5, 26], we evaluate performance on held-out subsets of interleaved (Obelics), Image-caption (HQITP), and text-only data (DCLM). Further implementation details are provided in Appendix A.
102
+
103
+ # 3. Scaling native multimodal models
104
+
105
+ In this section, we present a scaling laws study of native multimodal models, examining various architectural choices § 3.1, exploring different data mixtures § 3.2, analyzing the practical trade-offs between late and early fusion NMMs, and comparing the performance of native pretraining and continual pre-training of NMMs § 3.3.
106
+
107
+ Setup. We train models ranging from 0.3B to 4B active parameters, scaling the width while keeping the depth constant. For smaller training token budgets, we reduce the warm-up phase to 1K steps while maintaining 5K steps for larger budgets. Following Hagele et al. [25], models are trained with a constant learning rate, followed by a cooldown phase using an inverse square root scheduler. The cool-down phase spans $20\%$ of the total steps spent at the constant learning rate. To estimate the scaling coefficients in Eq 1, we apply the L-BFGS algorithm [51] and Huber loss [28] (with $\delta = 10^{-3}$ ), performing a grid search over initialization ranges.
108
+
109
+ # 3.1. Scaling laws of NMMs
110
+
111
+ Scaling laws for early-fusion and late-fusion models. Figure 2 (left) presents the final loss averaged across interleaved, image-caption, and text datasets for early-fusion NMMs. The lowest-loss frontier follows a power law as a function of FLOPs. Fitting the power law yields the expression $L \propto C^{-0.049}$ , indicating the rate of improvement with increasing compute. When analyzing the scaling laws per data type (e.g., image-caption, interleaved, text), we observe that the exponent varies (Table 2). For instance, the model achieves a higher rate of improvement for image-
112
+
113
+ ![](images/dc53bc628df5feb99518e025ca084ba7b9428638cf809cc0d413eaca42641103.jpg)
114
+
115
+ ![](images/87949d3241155096a62e02bbef0b483bc4abd62f02b7c47214c9da40da229f94.jpg)
116
+
117
+ ![](images/ea002e02fed450556ae2ce5afb3686df9c86c34add67acb163233c6b6af4e322.jpg)
118
+
119
+ ![](images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg)
120
+ Figure 3. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the number of model parameters and the number of training tokens. Overall, early fusion shows a slight advantage, especially at smaller model sizes, and the gap decreases when scaling the number of parameters $N$ .
121
+
122
+ caption data $(L\propto C^{-0.061})$ when compared to interleaved documents $(L\propto C^{-0.046})$
123
+
124
+ To model the loss as a function of the number of training tokens $D$ and model parameters $N$ , we fit the parametric function in Eq 1, obtaining scaling exponents $\alpha = 0.301$ and $\beta = 0.335$ . These describe the rates of improvement when scaling the number of model parameters and training tokens, respectively. Assuming a linear relationship between compute, $N$ , and $D$ (i.e., $C \propto ND$ ), we derive the law relating model parameters to the compute budget (see Appendix C for details). Specifically, for a given compute budget $C$ , we compute the corresponding model size $N$ at logarithmically spaced $D$ values and determine $N_{opt}$ , the parameter count that minimizes loss. Repeating this across different FLOPs values produces a dataset of $(C, N_{opt})$ , to which we fit a power law predicting the compute-optimal model size as a function of compute: $N^{*} \propto C^{0.526}$ .
125
+
126
+ Similarly, we fit power laws to estimate the compute-optimal training dataset size as a function of compute and model size:
127
+
128
+ $$
129
+ D _ {o p t} \propto C ^ {0. 4 7 3}, D _ {o p t} \propto N ^ {0. 8 9 9}.
130
+ $$
131
+
132
+ These relationships allow practitioners to determine the optimal model and dataset size given a fixed compute budget. When analyzing by data type, we find that interleaved data benefits more from larger models ( $a = 0.532$ ) compared to image_caption data ( $a = 0.520$ ), whereas the opposite trend holds for training tokens.
133
+
134
+ We conduct a similar study on late-fusion models in Figure 2 (right) and observe comparable scaling behaviors. In particular, the loss scaling exponent $(c = -0.0494)$ is nearly identical to that of early fusion $(c = -0.0492)$ . This trend is evident in Figure 3, where early fusion outperforms late fusion at smaller model scales, while both architectures converge to similar performance at larger model sizes. We also observe similar trends when varying late-fusion con
135
+
136
+ ![](images/e2db9e4bc14538474e23bf3e8a07a771abc4e4bc5ddfb8611c4bfdfe5adf2479.jpg)
137
+ Figure 4. Early vs late: pretraining efficiency. Early-fusion is faster to train and consumes less memory. Models are trained on 16 H100 GPUs for 160k steps (300B tokens).
138
+
139
+ ![](images/7f7757b2858d9f840bbcacbd8c46970472f92633800743ce8acb9dab1d537e65.jpg)
140
+
141
+ figurations, such as using a smaller vision encoder with a larger text decoder Appendix B.
142
+
143
+ Scaling laws of NMMs vs LLMs. Upon comparing the scaling law coefficients of our NMMs to those reported for text-only LLMs (e.g., GPT-3, Chinchilla), we find them to be within similar ranges. In particular, for predicting the loss as a function of compute, GPT-3 [10] follows $L \propto C^{-0.048}$ , while our models follow $L \propto C^{-0.049}$ , suggesting that the performance of NMMs adheres to similar scaling laws as LLMs. Similarly, our estimates of the $\alpha$ and $\beta$ parameters in Eq 1 ( $\alpha = 0.301$ , $\beta = 0.335$ ) closely match those reported by Hoffmann et al. [26] ( $\alpha = 0.339$ , $\beta = 0.285$ ). Likewise, our computed values of $a = 0.526$ and $b = 0.473$ align closely with $a = 0.46$ and $b = 0.54$ from [26], reinforcing the idea that, for native multimodal models, the number of training tokens and model parameters should be scaled proportionally. However, since the gap between $a$ and $b$ is smaller than in LLMs, this principle holds even more strongly for NMMs. Additionally, as $a = 0.526$ is greater than $b = 0.473$ in our case, the optimal model size for NMMs is larger than that of LLMs,
144
+
145
+ ![](images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg)
146
+ Figure 5. Scaling laws with different training mixtures. Early-fusion models follow similar scaling trends when changing the pretraining mixtures. However, increasing the image captions leads to a higher scaling exponent norm (see Table 3).
147
+
148
+ <table><tr><td></td><td>C-I-T (%)</td><td>I/T ratio</td><td>E</td><td>α</td><td>β</td><td>a</td><td>b</td><td>d</td><td>c</td></tr><tr><td>1</td><td>45-45-10</td><td>1.19</td><td>1.906</td><td>0.301</td><td>0.335</td><td>0.527</td><td>0.474</td><td>0.901</td><td>-0.0492</td></tr><tr><td>2</td><td>40-20-40</td><td>0.65</td><td>1.965</td><td>0.328</td><td>0.348</td><td>0.518</td><td>0.486</td><td>0.937</td><td>-0.0486</td></tr><tr><td>3</td><td>30-30-40</td><td>0.59</td><td>1.847</td><td>0.253</td><td>0.338</td><td>0.572</td><td>0.428</td><td>0.748</td><td>-0.0463</td></tr><tr><td>4</td><td>20-40-40</td><td>0.49</td><td>1.836</td><td>0.259</td><td>0.354</td><td>0.582</td><td>0.423</td><td>0.726</td><td>-0.0488</td></tr></table>
149
+
150
+ Table 3. Scaling laws for different training mixtures. Early-fusion models. C-I-T refer to image-caption, interleaved and text
151
+
152
+ while the optimal number of training tokens is lower, given a fixed compute budget.
153
+
154
+ Compute-optimal trade-offs for early vs. late fusion NMMs. While late- and early-fusion models reduce loss at similar rates with increasing FLOPs, we observe distinct trade-offs in their compute-optimal models. Specifically, $N_{opt}$ is larger for late-fusion models, whereas $D_{opt}$ is larger for early-fusion models. This indicates that, given a fixed compute budget, late-fusion models require a higher number of parameters, while early-fusion models benefit more from a higher number of training tokens. This trend is also reflected in the lower $\frac{N_{opt}}{D_{opt}} \propto C^{0.053}$ for early fusion compared to $\frac{N_{opt}}{D_{opt}} \propto C^{0.076}$ for late fusion. As shown in Figure 1 (bottom), when scaling FLOPs, the number of parameters of early fusion models becomes significantly lower, which is crucial for reducing inference costs and, consequently, lowering serving costs after deployment.
155
+
156
+ Early-fusion is more efficient to train. We compare the training efficiency of late- and early-fusion architectures. As shown in Figure 4, early-fusion models consume less memory and train faster under the same compute budget. This advantage becomes even more pronounced as compute increases, highlighting the superior training efficiency of early fusion while maintaining comparable performance to late fusion at scale. Notably, for the same FLOPs, late-fusion models have a higher parameter count and higher effective depth (i.e., additional vision encoder layers alongside decoder layers) compared to early-fusion models.
157
+
158
+ ![](images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg)
159
+ Figure 7. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models attain a favorable performance when increasing the proportion of interleaved documents and text-only data.
160
+
161
+ ![](images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg)
162
+
163
+ # 3.2. Scaling laws for different data mixtures
164
+
165
+ We investigate how variations in the training mixture affect the scaling laws of native multimodal models. To this end, we study four different mixtures that reflect common community practices [34, 41, 46, 81], with Image Caption-Interleaved-Text ratios of 45-45-10 (our default setup), 30-30-40, 40-20-40, and 20-40-40. For each mixture, we conduct a separate scaling study by training 76 different models, following our setup in § 3.1. Overall, Figure 5 shows that different mixtures follow similar scaling trends; however, the scaling coefficients vary depending on the mixture (Table 3). Interestingly, increasing the proportion of image-caption data (mixtures 1 and 2) leads to lower $a$ and higher $b$ , whereas increasing the ratio of interleaved and text data (mixtures 3 and 4) have the opposite effect. Notably, image-caption data contains more image tokens than text tokens; therefore, increasing its proportion results in more image tokens, while increasing interleaved and text data increases text token counts. This suggests that, when image tokens are prevalent, training for longer decreases the loss faster than increasing the model size. We also found that for a fixed model size, increasing text-only and interleaved data ratio is in favor of early-fusion Figure 7.
166
+
167
+ ![](images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg)
168
+ Figure 8. Early native vs initializing from LLMs: initializing from pre-trained models and scaling training tokens. We compare training with and without initializing from DCLM-1B.
169
+
170
+ # 3.3. Native multimodal pre-training vs. continual training of LLMs
171
+
172
+ In this section, we compare training natively from scratch to continual training after initializing from a pre-trained LLM. We initialize the model from DCLM-1B [21] that is trained on more than 2T tokens. Figure 8 shows that native multimodal models can close the gap with initialized models when trained for longer. Specifically, on image captioning data, the model requires fewer than 100B multimodal tokens to reach comparable performance. However, on interleaved and text data, the model may need longer training—up to 1T tokens. Considering the cost of pre-training, these results suggest that training natively could be a more efficient approach for achieving the same performance on multimodal benchmarks.
173
+
174
+ # 4. Towards multimodal specialization
175
+
176
+ Previously, we demonstrated that early-fusion models achieve performance on par with late-fusion models under a fixed compute budget. However, multimodal data is inherently heterogeneous, and training a unified model to fit such diverse distributions may be suboptimal. Here, we argue for multimodal specialization within a unified architecture. Ideally, the model should implicitly adapt to each modality, for instance, by learning modality-specific weights or specialized experts. Mixture of Experts is a strong candidate for this approach, having demonstrated effectiveness in LLMs. In this section, we highlight the advantages of sparse early-fusion models over their dense counterparts.
177
+
178
+ Setup. Our sparse models are based on the dropless-MoE implementation of Gale et al. [24], which eliminates token dropping during training caused by expert capacity constraints. We employ a top- $k$ expert-choice routing mechanism, where each token selects its top- $k$ experts among the $E$ available experts. Specifically, we set $k = 1$ and $E = 8$ , as we find this configuration to work effectively. Additionally, we incorporate an auxiliary load-balancing loss [59] with a weight of 0.01 to ensure a balanced expert utilization.
179
+
180
+ ![](images/869818728d380a70e41fbd45b2de162b41240502115758a76d23bbc20a513422.jpg)
181
+ Figure 9. Scaling laws for sparse early-fusion NMMs. We report the final validation loss averaged across interleaved, image-captions and text data.
182
+
183
+ Following Abnar et al. [2], we compute training FLOPs as $6ND$ , where $N$ represents the number of active parameters.
184
+
185
+ # 4.1. Sparse vs dense NMMs when scaling FLOPs
186
+
187
+ We compare sparse MoE models to their dense counterparts by training models with different numbers of active parameters and varying amounts of training tokens. Figure 10 shows that, under the same inference cost (or number of active parameters), MoEs significantly outperform dense models. Interestingly, this performance gap is more pronounced for smaller model sizes. This suggests that MoEs enable models to handle heterogeneous data more effectively and specialize in different modalities. However, as dense models become sufficiently large, the gap between the two architectures gradually closes.
188
+
189
+ # 4.2. Scaling laws for sparse early-fusion models
190
+
191
+ We train different models (ranging from 300M to 3.4B active parameters) on varying amounts of tokens (ranging from 250M to 600B) and report the final loss in Figure 9. We fit a power law to the convex hull of the lowest loss as a function of compute (FLOPs). Interestingly, the exponent $(-0.048)$ is close to that of dense NMMs $(-0.049)$ , indicating that both architectures scale similarly. However, the multiplicative constant is smaller for MoEs (27.086) compared to dense models (29.574), revealing lower loss. Additionally, MoEs require longer training to reach saturation compared to dense models (Appendix C for more details). We also predict the coefficients of Eq 1 by considering $N$ as the number of active parameters. Table 2 shows significantly higher $\alpha$ compared to dense models. Interestingly, $b$ is significantly higher than $a$ , revealing that the training tokens should be scaled at a higher rate than the number of parameters when training sparse NMMs. We also experiment with a scaling law that takes into account the sparsity [2] and reached similar conclusions in Appendix C.7.
192
+
193
+ # 4.3. Modality-aware vs. Modality-agnostic routing
194
+
195
+ Another alternative to MoEs is modality-aware routing, where multimodal tokens are assigned to experts based on
196
+
197
+ ![](images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg)
198
+ Figure 10. MoE vs Dense: scaling training FLOPs. We compare MoE and dense early-fusion models when scaling both the amount of training tokens and model sizes. MoEs beat dense models when matching the number of active parameters.
199
+
200
+ their modalities, similar to previous works [7, 75]. We train models with distinct image and text experts in the form of FFNs, where image tokens are processed only by the image FFN and text tokens only by the text FFN. Compared to modality-aware routing, MoEs exhibit significantly better performance on both image-caption and interleaved data as presented in Figure 11.
201
+
202
+ # 4.4. Emergence of expert specialization and sharing
203
+
204
+ We investigate multimodal specialization in MoE architectures. In Figure 13, we visualize the normalized number of text and image tokens assigned to each expert across layers. To quantify this specialization, we compute a specialization score, defined as the average, across all experts within a layer, of $1 - H(p)$ , where $H$ is the binary entropy of each expert's text/image token distribution. We plot this specialization score in Figure 12. Higher specialization scores indicate a tendency for experts to focus on either text or image tokens, while lower scores indicate a shared behavior. These visualizations provide clear evidence of modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases, before rising again in the last layers. This suggests that early and final layers exhibit higher modality specialization compared to mid-layers. This behavior is intuitive, as middle layers are expected to hold higher-level features that may generalize across modalities, and consistent with findings in [61] that shows increasing alignment between modalities across layers. The emergence of both expert specialization and cross-modality sharing in our modality-agnostic MoE, suggests it may be a preferable approach compared to modality-aware sparsity. All data displayed here is from an early-fusion MoE model with 1B active parameters trained for 300B tokens.
205
+
206
+ <table><tr><td></td><td colspan="6">Accuracy</td><td colspan="2">CIDEr</td></tr><tr><td></td><td>AVG</td><td>VQAv2</td><td>TextVQA</td><td>OKVQA</td><td>GQA</td><td>VizWiz</td><td>COCO</td><td>TextCaps</td></tr><tr><td>Late-fusion</td><td>46.8</td><td>69.4</td><td>25.8</td><td>50.1</td><td>65.8</td><td>22.8</td><td>70.7</td><td>50.9</td></tr><tr><td>Early-fusion</td><td>47.6</td><td>69.3</td><td>28.1</td><td>52.1</td><td>65.4</td><td>23.2</td><td>72.0</td><td>53.8</td></tr><tr><td>Early-MoEs</td><td>48.2</td><td>69.8</td><td>30.0</td><td>52.1</td><td>65.4</td><td>23.6</td><td>69.6</td><td>55.7</td></tr></table>
207
+
208
+ Table 4. Supervised finetuning on the LLaVA mixture. All models are native at 1.5B scale and pre-trained on 300B tokens.
209
+
210
+ ![](images/df21bd1e747a0e8383b76d796688d703c2e2f383b9eba46b54ccd05bbe718f73.jpg)
211
+
212
+ ![](images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg)
213
+
214
+ ![](images/c3de713136a78894e21369ded443a32da1ce3d11b7bf70df27077caf95a41978.jpg)
215
+ Figure 11. Modality-aware vs modality agnostic routing for sparse NMMs. We compare modality-agnostic routing with modality-aware routing when scaling both the amount of training tokens and model sizes.
216
+
217
+ # 5. Evaluation on downstream tasks with SFT
218
+
219
+ Following previous work on scaling laws, we primarily rely on validation losses. However, we generally find that this evaluation correlates well with performance on downstream tasks. To validate this, we conduct a multimodal instruction tuning stage (SFT) on the LLaVA mixture [43] and report accuracy and CIDEr scores across several VQA and captioning tasks. Table 4 confirms the ranking of different model configurations. Specifically, early fusion outperforms late fusion, and MoEs outperform dense models. However, since the models are relatively small (1.5B scale), trained from scratch, and fine-tuned on a small dataset, the overall scores are lower than the current state of the art. Further implementation details can be found in Appendix A.
220
+
221
+ # 6. Related work
222
+
223
+ Large multimodal models. A long-standing research goal has been to develop models capable of perceiving the world through multiple modalities, akin to human sensory experience. Recent progress in vision and language processing has shifted the research focus from smaller, task-specific models toward large, generalist models that can handle diverse inputs [29, 67]. Crucially, pre-trained vision and language backbones often require surprisingly little adaptation to enable effective cross-modal communication [32, 47, 62, 68, 69]. Simply integrating a vision encoder with either an encoder-decoder architecture [45, 48, 63, 72]
224
+
225
+ ![](images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg)
226
+ Figure 12. MoE specialization score. Entropy-based image/text specialization score (as described in § 4.4) across layers for two data sources: HQITP and Obelics. HQITP has a more imbalanced image-to-text token distribution, resulting in generally higher specialization. Despite this difference, both data sources exhibit a similar trend: the specialization score decreases in the early layers before increasing again in the final layers.
227
+
228
+ or a decoder-only LLM has yielded highly capable multimodal systems [1, 6, 9, 13, 16, 35, 43, 49, 64, 73, 78, 83]. This late-fusion approach, where modalities are processed separately before being combined, is now well-understood, with established best practices for training effective models [34, 41, 46, 81]. In contrast, early-fusion models [8, 18, 66], which combine modalities at an earlier stage, remain relatively unexplored, with only a limited number of publicly released models [8, 18]. Unlike [18, 66], our models utilize only a single linear layer and rely exclusively on a next-token prediction loss. Furthermore, we train our models from scratch on all modalities without image tokenization.
229
+
230
+ Native Multimodal Models. We define native multimodal models as those trained from scratch on all modalities simultaneously [67] rather than adapting LLMs to accommodate additional modalities. Due to the high cost of training such models, they remain relatively underexplored, with most relying on late-fusion architectures [27, 79]. Some multimodal models trained from scratch [4, 66, 76] relax this constraint by utilizing pre-trained image tokenizers such as [20, 70] to convert images into discrete tokens, integrating them into the text vocabulary. This approach enables models to understand and generate text and images, facilitating a more seamless multimodal learning process.
231
+
232
+ Scaling laws. Scaling law studies aim to predict how model performance scales with training compute. Early works [26, 31] found that LLM performance follows a power-law relationship with compute, enabling the compute-optimal estimation of the number of model parameters and training tokens at scale for a given budget. Similar research has extended these findings to sparse Mixture of Experts (MoE) models, considering factors such as sparsity, number of experts, and routing granularity [15, 33, 74]. Scaling laws have also been observed across various domains, including image models [23], video models [56], protein LLMs [14], and imitation learning [54]. However, few stud
233
+
234
+ ![](images/f3ebdfb256272f381112555508c27704cd3e60fe5c08b203fb55b9fc64a1a634.jpg)
235
+ Figure 13. MoE specialization frequency. Percentage of text and image tokens routed to each expert on interleaved data from Obelics. Experts are ordered for better visualization. The first layer shows the highest amount of unimodal experts.
236
+
237
+ ![](images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg)
238
+
239
+ ![](images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg)
240
+
241
+ ies have investigated scaling laws for multimodal models. Notably, Aghajanyan et al. [5] examined multimodal models that tokenize modalities into discrete tokens and include multimodal generation. In contrast, we focus on studying early-fusion models that take raw multimodal inputs and are trained on interleaved multimodal data.
242
+
243
+ Mixture of experts (MoEs). MoEs [59] scale model capacity efficiently by sparsely activating parameters, enabling large models with reduced per-sample compute. While widely studied in LLMs [22, 30, 36, 37, 42, 65, 77, 84], MoEs remain underexplored in multimodal settings. Prior work has examined contrastive models [50], late-fusion LLMs [38, 40], and modality-specific experts [7, 12, 60]. We focus on analyzing MoEs in early-fusion multimodal models.
244
+
245
+ # 7. Limitations
246
+
247
+ Our study finds that scaling law coefficients are broadly consistent across training mixtures, though a broader exploration is needed to validate this observation. While validation loss scales predictably with compute, the extent to which this correlates with downstream performance remains unclear and warrants further investigation. The accuracy of scaling law predictions improves with higher FLOPs, but their extrapolation to extreme model sizes is still an open question (Appendix D for more details).
248
+
249
+ # 8. Conclusion
250
+
251
+ We explore various strategies for compute-optimal pretraining of native multimodal models. We found the NMMs follow similar scaling laws to those of LLMs. Contrary to common belief, we find no inherent advantage in adopting late-fusion architectures over early-fusion ones. While both architectures exhibit similar scaling properties, early-fusion models are more efficient to train and outperform late-fusion models at lower compute budgets. Furthermore, we show that sparse architectures encourage modality-specific specialization, leading to performance improvements while maintaining the same inference cost.
252
+
253
+ # Acknowledgment
254
+
255
+ We thank Philipp Dufter, Samira Abnar, Xiujun Li, Zhe Gan, Alexander Toshev, Yinfei Yang, Dan Busbridge, and Jason Ramapuram for many fruitful discussions. We thank Denise Hui, and Samy Bengio for infra and compute support. Finally, we thank, Louis Bethune, Pierre Ablin, Marco Cuturi, and the MLR team at Apple for their support throughout the project.
256
+
257
+ # References
258
+
259
+ [1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 8
260
+ [2] Samira Abnar, Harshay Shah, Dan Busbridge, Alaaeldin Mohamed Elnouby Ali, Josh Susskind, and Vimal Thilak. Parameters vs flops: Scaling laws for optimal sparsity for mixture-of-experts language models. arXiv preprint arXiv:2501.12370, 2025. 2, 3, 6, 18, 20
261
+ [3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1
262
+ [4] Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. 8
263
+ [5] Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In International Conference on Machine Learning, pages 265-279. PMLR, 2023. 3, 8
264
+ [6] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. 1, 8
265
+ [7] Hangbo Bao, Wenhui Wang, Li Dong, Qiang Liu, Owais Khan Mohammed, Kriti Aggarwal, Subhojit Som, and Furu Wei. Vlmo: Unified vision-language pretraining with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358, 2021. 7, 8
266
+ [8] Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, and Săgnak Taşirlar. Introducing our multimodal models, 2023. 8
267
+ [9] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1, 8
268
+
269
+ [10] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3, 4
270
+ [11] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3, 13
271
+ [12] Junyi Chen, Longteng Guo, Jia Sun, Shuai Shao, Zehuan Yuan, Liang Lin, and Dongyu Zhang. Eve: Efficient vision-language pre-training with masked prediction and modality-aware moe. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 1110-1119, 2024. 8
272
+ [13] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 8
273
+ [14] Xingyi Cheng, Bo Chen, Pan Li, Jing Gong, Jie Tang, and Le Song. Training compute-optimal protein language models. bioRxiv, 2024. 8
274
+ [15] Aidan Clark, Diego de Las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bogdan Damoc, Blake Hechtman, Trevor Cai, Sebastian Borgeaud, et al. Unified scaling laws for routed language models. In International conference on machine learning, pages 4057-4086. PMLR, 2022. 8
275
+ [16] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024.8
276
+ [17] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdul-mohsin, et al. Scaling vision transformers to 22 billion parameters. In International Conference on Machine Learning, pages 7480-7512. PMLR, 2023. 3
277
+ [18] Haiwen Diao, Yufeng Cui, Xiaotong Li, Yueze Wang, Huchuan Lu, and Xinlong Wang. Unveiling encoder-free vision-language models. arXiv preprint arXiv:2406.11832, 2024.8
278
+ [19] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1
279
+ [20] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12873-12883, 2021. 8
280
+ [21] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 3, 6, 13
281
+
282
+ [22] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 8
283
+ [23] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders, 2024. 1, 8
284
+ [24] Trevor Gale, Deepak Narayanan, Cliff Young, and Matei Zaharia. Megablocks: Efficient sparse training with mixture-of-experts. Proceedings of Machine Learning and Systems, 5:288-304, 2023. 6
285
+ [25] Alexander Hagele, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 3
286
+ [26] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, pages 30016-30030, 2022. 2, 3, 4, 8, 17
287
+ [27] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, et al. Language is not all you need: Aligning perception with language models. Advances in Neural Information Processing Systems, 36:72096-72109, 2023. 8
288
+ [28] Peter J. Huber. Robust Estimation of a Location Parameter, pages 492-518. Springer New York, New York, NY, 1992. 3
289
+ [29] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 7
290
+ [30] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 8
291
+ [31] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 2, 8, 15
292
+ [32] Jing Yu Koh, Ruslan Salakhutdinov, and Daniel Fried. Grounding language models to images for multimodal inputs and outputs. In International Conference on Machine Learning, pages 17283-17300. PMLR, 2023. 7
293
+ [33] Jakub Krajewski, Jan Ludziejewski, Kamil Adamczewski, Maciej Pioro, Michal Krutul, Szymon Antoniak, Kamil Ciebiera, Krystian Król, Tomasz Odrzygoźdź, Piotr Sankowski, et al. Scaling laws for fine-grained mixture of experts. arXiv preprint arXiv:2402.07871, 2024. 8, 18
294
+
295
+ [34] Hugo Laurencon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024. 3, 5, 8, 13
296
+ [35] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? arXiv preprint arXiv:2405.02246, 2024. 1, 8
297
+ [36] Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668, 2020. 8
298
+ [37] Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. Base layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265-6274. PMLR, 2021. 8
299
+ [38] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024. 8
300
+ [39] Jeffrey Li, Alex Fang, Georgios Smyrnis, Maor Ivgi, Matt Jordan, Samir Gadre, Hritik Bansal, Etash Guha, Sedrick Keh, Kushal Arora, et al. Datacomp-lm: In search of the next generation of training sets for language models. arXiv preprint arXiv:2406.11794, 2024. 3, 13, 15
301
+ [40] Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947, 2024. 8
302
+ [41] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 5, 8
303
+ [42] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 8
304
+ [43] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 1, 7, 8
305
+ [44] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 13
306
+ [45] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. In The Eleventh International Conference on Learning Representations, 2022. 7
307
+ [46] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Duffer, Dhruti Shah, Xianzhi Du, Futang Peng, Anton Belyi, et al. Mm1: methods, analysis and insights from multimodal llm pre-training. In European Conference on Computer Vision, pages 304–323. Springer, 2025. 5, 8, 13
308
+
309
+ [47] Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. Linearly mapping from image to text space. In *The Eleventh International Conference on Learning Representations*, 2023. 7
310
+ [48] David Mizrahi, Roman Bachmann, Oguzhan Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling. Advances in Neural Information Processing Systems, 36:58363-58408, 2023. 7
311
+ [49] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. 8
312
+ [50] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. Advances in Neural Information Processing Systems, 35:9564-9576, 2022. 8
313
+ [51] Jorge Nocedal. Updating quasi newton matrices with limited storage. Mathematics of Computation, 35(151):951-958, 1980. 3
314
+ [52] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 1
315
+ [53] Tim Pearce and Jinyeop Song. Reconciling kaplan and chinchilla scaling laws. arXiv preprint arXiv:2406.12907, 2024. 15
316
+ [54] Tim Pearce, Tabish Rashid, Dave Bignell, Raluca Georgescu, Sam Devlin, and Katja Hofmann. Scaling laws for pre-training agents and world models. arXiv preprint arXiv:2411.04434, 2024. 8
317
+ [55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 1, 3, 15
318
+ [56] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pretraining from videos. arXiv preprint arXiv:2501.05453, 2025.8
319
+ [57] Kanchana Ranasinghe, Brandon McKinzie, Sachin Ravi, Yinfei Yang, Alexander Toshev, and Jonathon Shlens. Perceptual grouping in contrastive vision-language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5571-5584, 2023. 13
320
+ [58] Noam Shazeer. Glu variants improve transformer. arXiv preprint arXiv:2002.05202, 2020. 3
321
+ [59] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture
322
+
323
+ of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 6, 8
324
+ [60] Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, and Yuxiong He. Scaling vision-language models with sparse mixture of experts. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. 8
325
+ [61] Mustafa Shukor and Matthieu Cord. Implicit multimodal alignment: On the generalization of frozen llms to multimodal inputs. Advances in Neural Information Processing Systems, 37:130848-130886, 2024. 7
326
+ [62] Mustafa Shukor, Corentin Dancette, and Matthieu Cord. eplalm: Efficient perceptual augmentation of language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22056-22069, 2023. 1, 7
327
+ [63] Mustafa Shukor, Corentin Dancette, Alexandre Rame, and Matthieu Cord. Unival: Unified model for image, video, audio and language tasks. Transactions on Machine Learning Research Journal, 2023. 7
328
+ [64] Mustafa Shukor, Dana Aubakirova, Francesco Capuano, Pepijn Kooijmans, Steven Palma, Adil Zoutine, Michel Ar-actingi, Caroline Pascal, Martino Russi, Andres Marafioti, et al. Smolvla: A vision-language-action model for affordable and efficient robotics. arXiv preprint arXiv:2506.01844, 2025. 1, 8
329
+ [65] Xingwu Sun, Yanfeng Chen, Yiqing Huang, Ruobing Xie, Jiaqi Zhu, Kai Zhang, Shuaipeng Li, Zhen Yang, Jonny Han, Xiaobo Shu, et al. Hunyuan-large: An open-source moe model with 52 billion activated parameters by tencent. arXiv preprint arXiv:2411.02265, 2024. 8
330
+ [66] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 8
331
+ [67] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 7, 8
332
+ [68] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyals, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 7
333
+ [69] Théophane Vallaeys, Mustafa Shukor, Matthieu Cord, and Jakob Verbeek. Improved baselines for data-efficient perceptual augmentation of llms. arXiv preprint arXiv:2403.13499, 2024. 7
334
+ [70] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 8
335
+ [71] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 3
336
+ [72] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International conference on machine learning, pages 23318-23340. PMLR, 2022. 7
337
+
338
+ [73] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8
339
+ [74] Siqi Wang, Zhengyu Chen, Bei Li, Keqing He, Min Zhang, and Jingang Wang. Scaling laws across model architectures: A comparative analysis of dense and MoE models in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5583-5595, Miami, Florida, USA, 2024. Association for Computational Linguistics. 8, 18
340
+ [75] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 7
341
+ [76] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 8
342
+ [77] Tianwen Wei, Bo Zhu, Liang Zhao, Cheng Cheng, Biye Li, Weiwei Lu, Peng Cheng, Jianhao Zhang, Xiaoyu Zhang, Liang Zeng, et al. Skywork-moe: A deep dive into training techniques for mixture-of-experts language models. arXiv preprint arXiv:2406.06563, 2024.8
343
+ [78] Le Xue, Manli Shu, Anas Awadalla, Jun Wang, An Yan, Senthil Purushwalkam, Honglu Zhou, Viraj Prabhu, Yutong Dai, Michael S Ryoo, et al. xgen-mm (blip-3): A family of open large multimodal models. arXiv preprint arXiv:2408.08872, 2024. 1, 8
344
+ [79] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 8
345
+ [80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 1
346
+ [81] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Duffer, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 5, 8
347
+ [82] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 3
348
+ [83] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 8
349
+ [84] Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906, 2022. 8
350
+
351
+ # Scaling Laws for Native Multimodal Models Supplementary Material
352
+
353
+ This supplementary material is organized as follows:
354
+
355
+ - Appendix A: contains the implementation details and the hyperparameters used to train our models.
356
+ - Appendix B: contains detailed comparison between early and late fusion models.
357
+ - Appendix C: contains more details about scaling laws derivation, evaluation and additional results.
358
+ - Appendix D: contains discussion about the paper limitations.
359
+ - Appendix E: contains more results about MoEs and modality specialization.
360
+
361
+ # A. Experimental setup
362
+
363
+ In Table 6, we show the pre-training hyperparameters for different model configurations used to derive the scaling laws. The number of parameters ranges from 275M to 3.7B, with model width increasing accordingly, while the depth remains fixed at 24 layers. Learning rates vary by model size, decreasing as the model scales up. Based on empirical experiments and estimates similar to [46], we found these values to be effective in our setup. Training is optimized using a fully decoupled AdamW optimizer with momentum values $\beta_{1} = 0.9$ , $\beta_{2} = 0.95$ , and a weight decay of $1\mathrm{e} - 4$ . The batch size is set to 2k samples, which account for 2M tokens, given 1k context length. Gradient clipping is set to 1.0, with a maximum warmup duration of 5k iterations, adjusted for shorter training runs: 1k and 2.5k warmup steps for models trained between 1k-4k and 5k-15k steps, respectively. For MoEs, we found that longer warmup is significantly better, so we adopt a 2.5k warmup for all runs under 20k steps. We use a constant learning rate schedule with cooldown during the final $20\%$ of training, gradually reducing to zero following an inverse square root schedule. For vision processing, image inputs are divided into (14, 14) patches, with augmentations including Random Resized Crop (resizing images to 224px with a scale range of [0.4, 1.0]) and Random Horizontal Flip with a probability of 0.5. We train our models on mixture of interleaved, image captions and text only data Table 5. For late fusion models, we found that using smaller learning rate for the vision encoder significantly boost the performance Table 8, and when both the encoder and decoder are initialized (Appendix B.7) we found that freezing the vision encoder works best Table 7.
364
+
365
+ <table><tr><td>Data type</td><td>dataset</td><td>#samples</td><td>sampling prob.</td></tr><tr><td></td><td>DFN [21]</td><td>2B</td><td>27%</td></tr><tr><td>Image-Caption</td><td>COYO [11]</td><td>600M</td><td>11.25%</td></tr><tr><td></td><td>HQITP[57]</td><td>400M</td><td>6.75%</td></tr><tr><td>Interleaved</td><td>Obelics [34]</td><td>141M Docs</td><td>45%</td></tr><tr><td>Text</td><td>DCLM [39]</td><td>6.6T Toks</td><td>10%</td></tr></table>
366
+
367
+ Table 5. Pre-training data mixture. Unless otherwise specified, the training mixture contains $45\%$ , $45\%$ and $10\%$ of image captions, interleaved documents and text-only data.
368
+
369
+ <table><tr><td colspan="7">Early-fusion</td></tr><tr><td>Params</td><td>275M</td><td>468M</td><td>932M</td><td>1.63B</td><td>2.28B</td><td>3.35B</td></tr><tr><td>width</td><td>800</td><td>1088</td><td>1632</td><td>2208</td><td>2624</td><td>3232</td></tr><tr><td>depth</td><td></td><td></td><td>24</td><td></td><td></td><td></td></tr><tr><td>Learning rate</td><td>1.5e-3</td><td>1.5e-3</td><td>5e-4</td><td>4.2e-4</td><td>4e-4</td><td>3.5e-4</td></tr><tr><td colspan="7">Late-fusion</td></tr><tr><td>Params</td><td>289M</td><td>494M</td><td>1B</td><td>1.75B</td><td>2.43B</td><td>3.7B</td></tr><tr><td>vision encoder width</td><td>384</td><td>512</td><td>768</td><td>1024</td><td>1184</td><td>1536</td></tr><tr><td>vision encoder depth</td><td></td><td></td><td>24</td><td></td><td></td><td></td></tr><tr><td>width</td><td>768</td><td>1024</td><td>1536</td><td>2048</td><td>2464</td><td>3072</td></tr><tr><td>depth</td><td></td><td></td><td>24</td><td></td><td></td><td></td></tr><tr><td>Learning rate</td><td>1.5e-3</td><td>1.5e-3</td><td>5e-4</td><td>4.2e-4</td><td>3.8e-4</td><td>3.3e-4</td></tr><tr><td colspan="7">Early-fusion MoEs</td></tr><tr><td>Active Params</td><td>275M</td><td>468M</td><td>932M</td><td>1.63B</td><td>2.28B</td><td>3.35B</td></tr><tr><td>width</td><td>800</td><td>1088</td><td>1632</td><td>2208</td><td>2624</td><td>3232</td></tr><tr><td>depth</td><td></td><td></td><td>24</td><td></td><td></td><td></td></tr><tr><td>Learning rate</td><td>1.5e-3</td><td>1.5e-3</td><td>5 e-4</td><td>4.2e-4</td><td>4e-4</td><td>3.5e-4</td></tr><tr><td>Training tokens</td><td></td><td></td><td>2.5B-600B</td><td></td><td></td><td></td></tr><tr><td>Optimizer</td><td></td><td></td><td>Fully decoupled AdamW [44]</td><td></td><td></td><td></td></tr><tr><td>Optimizer Momentum</td><td></td><td></td><td>β1=0.9, β2=0.95</td><td></td><td></td><td></td></tr><tr><td>Minimum Learning rate</td><td></td><td></td><td>0</td><td></td><td></td><td></td></tr><tr><td>Weight decay</td><td></td><td></td><td>1e-4</td><td></td><td></td><td></td></tr><tr><td>Batch size</td><td></td><td></td><td>2k</td><td></td><td></td><td></td></tr><tr><td>Patch size</td><td></td><td></td><td>(14, 14)</td><td></td><td></td><td></td></tr><tr><td>Gradient clipping</td><td></td><td></td><td>1.0</td><td></td><td></td><td></td></tr><tr><td>MAximum Warmup iterations</td><td></td><td></td><td>5k</td><td></td><td></td><td></td></tr><tr><td>Augmentations:
370
+ RandomResizedCrop
371
+ size</td><td></td><td></td><td>224px</td><td></td><td></td><td></td></tr><tr><td>scale</td><td></td><td></td><td>[0.4, 1.0]</td><td></td><td></td><td></td></tr><tr><td>RandomHorizontalFlip</td><td></td><td></td><td>p=0.5</td><td></td><td></td><td></td></tr></table>
372
+
373
+ Table 6. Pre-training hyperparameters We detail the hyperparameters used for pre-training different model configurations to derive scaling laws.
374
+
375
+ <table><tr><td>Vision encoder
376
+ lr scheduler</td><td>Interleaved
377
+ (CE)</td><td>Image-Caption
378
+ (CE)</td><td>Text
379
+ (CE)</td><td>AVG
380
+ (CE)</td><td>AVG (SFT)
381
+ (Acc)</td></tr><tr><td>1</td><td>2.521</td><td>2.15</td><td>2.867</td><td>2.513</td><td>43.49</td></tr><tr><td>0.1</td><td>2.502</td><td>2.066</td><td>2.862</td><td>2.477</td><td>52.27</td></tr><tr><td>0.01</td><td>2.502</td><td>2.066</td><td>2.859</td><td>2.476</td><td>53.76</td></tr><tr><td>0.001</td><td>2.513</td><td>2.066</td><td>2.857</td><td>2.479</td><td>-</td></tr><tr><td>0 (frozen)</td><td>2.504</td><td>2.061</td><td>2.856</td><td>2.474</td><td>54.14</td></tr></table>
382
+
383
+ Table 7. Vision encoder scalar. Freezing the vision encoder works best when initializing late-fusion models with pre-trained models.
384
+
385
+ ![](images/e1045b0eee72b7e00af10c12fc407f2a2b374da404a7ef29ecfb2de71a4c8ab8.jpg)
386
+
387
+ ![](images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg)
388
+
389
+ ![](images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg)
390
+
391
+ ![](images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg)
392
+ Figure 14. Early vs late fusion: scaling training FLOPs. We compare early and late fusion models when scaling both the model size and the number of training tokens. The gap decreases mainly due to scaling models size.
393
+
394
+ ![](images/94e560b7e79f3313be19f00c8abc755758f179ac42e21d3c6cdac2d3d494893e.jpg)
395
+ Figure 15. Early vs late fusion: changing the training mixture. We vary the training mixtures and plot the final training loss. Early fusion models become better when increasing the proportion of interleaved documents. Early and late fusion has 1.63B and 1.75B parameters respectively.
396
+
397
+ ![](images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg)
398
+
399
+ ![](images/81e7cde88d0f03bd3d1762a769bf80c9cf66016126c5cafb8103a4885b3eefb2.jpg)
400
+
401
+ <table><tr><td>Vision encoder lrScaler</td><td>Interleaved (CE)</td><td>Image-Caption (CE)</td><td>Text (CE)</td><td>AVG (CE)</td><td>AVG (SFT) (Acc)</td></tr><tr><td>0.1</td><td>2.674</td><td>2.219</td><td>3.072</td><td>2.655</td><td>34.84</td></tr><tr><td>0.01</td><td>2.672</td><td>2.197</td><td>3.071</td><td>2.647</td><td>38.77</td></tr><tr><td>0.001</td><td>2.674</td><td>2.218</td><td>3.073</td><td>2.655</td><td>38.46</td></tr></table>
402
+
403
+ Table 8. Vision encoder scalar. Reducing the learning rate for the vision encoder is better when training late-fusion models from scratch.
404
+
405
+ # B. Late vs early fusion
406
+
407
+ This section provides additional comparison between early and late fusion models.
408
+
409
+ # B.1. Scaling FLOPs
410
+
411
+ Figure 14 compares early-fusion and late-fusion models when scaling FLOPs. Specifically, for each model size, we train multiple models using different amounts of training tokens. The performance gap between the two approaches mainly decreases due to increasing model sizes rather than increasing the number of training tokens. Despite the decreasing gap, across all the models that we train, early-fusion consistently outperform late-fusion.
412
+
413
+ # B.2. Changing the training data mixture
414
+
415
+ We analyze how the performance gap between early and late fusion models changes with variations in the training data mixture. As shown in Figure 16 and Figure 15, when fixing the model size, increasing the ratio of text and interleaved data favors early fusion. Interestingly, the gap remains largely unchanged for other data types. We also observe interference effects between different data types. Specifically, increasing the amount of interleaved data negatively impacts performance on image captions and vice versa. Additionally, increasing the proportion of text-only data slightly improves interleaved performance but increases loss on image captions. Overall, we find that text-only and interleaved data are correlated across different setups.
416
+
417
+ # B.3. Scaling image resolution is in favor of early-fusion
418
+
419
+ We examine how both architectures perform with varying image resolution. We fix the number of model parameters to 1.63B and 1.75B for early and late fusion respectively. All models are trained for 100K steps or 200B tokens. Since
420
+
421
+ ![](images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg)
422
+ Figure 16. Early vs late fusion: changing the amount of text-only data in the training mixture (isoFLOPs). We vary the ratio of text-only data and plot the final training loss. The gap increases with the text data ratio in favor of early fusion model. Early fusion has 1.63B parameters and late fusion 1.75B parameters.
423
+
424
+ ![](images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg)
425
+
426
+ ![](images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg)
427
+
428
+ ![](images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg)
429
+ Figure 17. Early vs late fusion: training with different image resolutions (isoFLOPs). For the same training FLOPs we vary the image resolution (and thus the number of image tokens) during training and report the final training loss. Increasing resolution, hurts the performance on text and interleaved documents, while helping image captioning. The gap stays almost the same on text and interleaved data while slightly increase on image captioning in favor of early fusion.
430
+
431
+ ![](images/9be0f1f4378cc87d81924a109805bfd368af57358e8a69b7dd0cb84c5d2d293b.jpg)
432
+
433
+ the patch size remains constant, increasing the resolution results in a higher number of visual tokens. For all resolutions, we maintain the same number of text tokens. As shown in Figure 17, the early-fusion model consistently outperforms the late-fusion model across resolutions, particularly for multimodal data, with the performance gap widening at higher resolutions. Additionally, we observe that the loss on text and interleaved data increases as resolution increases.
434
+
435
+ # B.4. Early-fusion is consistently better when matching the late-fusion model size
436
+
437
+ In this section, we compare the late-fusion model with different configurations of early-fusion one. Specifically, we train early-fusion models that match the late-fusion model in total parameters (Params), text model size (Text), and FLOPs (FLOPs), assuming 45-45-10 training mixture. As shown in Figure 18, early fusion consistently outperforms late fusion when normalized by total parameters, followed
438
+
439
+ by normalization by FLOPs. When matching the text model size, early fusion performs better at higher ratios of interleaved data.
440
+
441
+ # B.5. Different late-fusion configuration
442
+
443
+ We examine how this scaling changes with different late-fusion configurations. Instead of scaling both the vision and text models equally, as done in the main paper, we fix the vision encoder size to 300M and scale only the text model. Figure 19 shows that late-fusion models lag behind at smaller model sizes, with the gap closing significantly as the text model scales. This suggests that allocating more parameters to shared components is more beneficial, further supporting the choice of early-fusion models.
444
+
445
+ # B.6. Different context lengths
446
+
447
+ In the paper, we use a 1k context length following [31]. Also following, this paper, we ignore the context length effect, as the model dimension dominates the training compute estimate. Moreover, [53] empirically found that scaling coefficients are robust to context length. Nevertheless, Our initial experiments (Figure 20) indicate that scaling the context length did not significantly affect the comparison between late and early fusion.
448
+
449
+ # B.7. Initializing from LLM and CLIP
450
+
451
+ We study the case where both late and early fusion models are initialized from pre-trained models, specifically DCLM-1B [39] and CLIP-ViT-L [55] for late fusion. Interestingly, Figure 21 shows that for text and interleaved multimodal documents, early fusion can match the performance of late fusion when trained for longer. However, closing the gap on image caption data remains more challenging. Notably, when considering the overall training cost, including that of pre-trained models, early fusion requires significantly longer training to compensate for the vision encoder's pretraining cost.
452
+
453
+ ![](images/bef8ca3878711b6a138e83a9bfee1c56c489545a02ac647bb89e7d1bbb3a12e6.jpg)
454
+
455
+ ![](images/f92c5996032cc044a4a3f0ed2777722f2dbfee317e184cb6745575c5c7f4f5ec.jpg)
456
+
457
+ ![](images/8b99abece7cc6b38f661c87c0bc8d05b6d13d113129c2d0fb0fa3e7c2fcb5155.jpg)
458
+
459
+ ![](images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg)
460
+ Figure 18. Early vs late fusion: changing the training mixture and early-fusion configuration. We vary the training mixtures and plot the final training loss for different configuration of early fusion models. For the same number of total parameters early fusion consistently outperform late fusion.
461
+
462
+ ![](images/b0edcfb8489751a58c88dfeb30b60fd61492d6bfaf25d9ea5df642aa01489397.jpg)
463
+
464
+ ![](images/de31258d30cda455adcab7e3e5a0b3e5d9d53d61358d4cde8f2e527f429a9010.jpg)
465
+
466
+ ![](images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg)
467
+
468
+ ![](images/9dbd382a1fce940ad7eb236a4d73fcae11b11afb2586a2dd0f48ae744b045239.jpg)
469
+ Figure 19. Early vs late fusion: scaling training FLOPs while fixing the vision encoder size. We compare early and late fusion models when scaling both the amount of training tokens and model sizes. For late fusion mdoels, we fix the vision encoder size (300M) and scale the text model (250M, 834M, 2B, 3B). The gap between early and late get tighter when scaling the text model.
470
+
471
+ ![](images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg)
472
+ Figure 20. Early vs late fusion with different context lengths.
473
+
474
+ ![](images/af197f1feb50d444ba04fef9c57e806bc731f5d9db518edfb3c47371741f6600.jpg)
475
+ Figure 21. Early vs late fusion when initializing the encoder and decoder. Early-fusion can match the performance of late-fusion models when trained for longer. However, the gap is bigger on image-caption data.
476
+
477
+ # C. Scaling laws
478
+
479
+ # C.1. Fitting $L = F(N,D)$
480
+
481
+ Following [26], we determine the parameters that minimize the following objective across all our runs $i$ :
482
+
483
+ $$
484
+ \min _ {a, b, e, \alpha , \beta} \sum_ {i} \operatorname {H u b e r} _ {\delta} \left(\operatorname {L S E} \left(a - \alpha \log N _ {i}, b - \beta \log D _ {i}, e\right) - \log L _ {i}\right), \tag {2}
485
+ $$
486
+
487
+ We perform this optimization across various initialization ranges and select the parameters that achieve the lowest loss across all initializations. Specifically, our grid search spans $\{0, 0.5, 2.5\}$ for $\alpha$ and $\beta$ , $\{0, 5, 10, \dots, 30\}$ for $a$ and $b$ , and $\{-1, -0.5, 1, 0.5\}$ for $e$ . We use the L-BFGS algorithm with $\delta = 1e - 3$ .
488
+
489
+ # C.2. Fitting $N \propto C^{a}, D \propto C^{b}, D \propto N^{d}$
490
+
491
+ While these equations have a closed-form solution [26] for early-fusion models that can be derived from Eq 1, this is not the case for late-fusion models without specifying either the vision encoder or text model size. To ensure a fair comparison, we derive these equations for both models, by performing linear regression in log space. We found that the regression is very close to the coefficient found with closed-form derivation Table 9. For instance, to derive $N = K_{a}C^{a}$ , given a FLOP budget $C$ and a set of linearly spaced tokens $D_{i}$ ranging from 10B to 600B, we compute the model size for each $D_{i}$ as $N_{i} = \frac{C}{6D}$ for early fusion and $N_{i} = \frac{C}{6D} + 0.483 * N_{v}$ for late fusion (for the 45-45-10 mixture, $D_{v} = 0.544D$ , thus $C = 6D(0.544N_{v} + N_{t})$ ). We then apply Eq 1 to obtain the loss for each model size and select $N$ that has the minimum loss. We repeat this for all FLOP values corresponding to our runs, resulting in a set of points $(C, N_{opt})$ that we use to regress $a$ and $K_{a}$ . We follow a similar procedure to find $b$ and $d$ . For late-fusion models, we regress a linear model to determine $N_{v}$ given $N$ . Notably, even though we maintain a fixed width ratio for late-fusion models, this approach is more accurate, as embedding layers prevent a strictly fixed ratio between text and vision model sizes. We present the regression results in Figure 22.
492
+
493
+ <table><tr><td>Model</td><td>a</td><td>b</td><td>d</td><td>n</td><td>dn</td></tr><tr><td>Closed form</td><td>0.52649</td><td>0.47351</td><td>0.89938</td><td>1.11188</td><td>-0.05298</td></tr><tr><td>Regression</td><td>0.52391</td><td>0.47534</td><td>0.90052</td><td>1.10224</td><td>-0.04933</td></tr></table>
494
+
495
+ Table 9. Scaling laws parameters for early-fusion. Doing regression to derive the scaling laws coefficients leads to very close results to using the closed-form solution.
496
+
497
+ # C.3. Fitting $L \propto C^c$
498
+
499
+ To determine the relationship between the final model loss and the compute budget $C$ , we begin by interpolating the points corresponding to the same model size and compute
500
+
501
+ the convex hull that covers the minimum loss achieved by all runs for each FLOP. This results in a continuous mapping from the FLOPs to the lowest loss. We consider a range of FLOPs, excluding very small values $(\leq 3e^{19})$ , and construct a dataset of $(C,L)$ for linearly spaced compute $C$ . Using this data, we find the linear relationship between $L$ and $C$ in the log space and deduce the exponent $c$ . We visualize the results in Figure 26.
502
+
503
+ ![](images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg)
504
+ C
505
+
506
+ ![](images/946d07662bb75c3af1e430bcb2e71945e0fc9b93f3c8ea8c090478e36a9ee523.jpg)
507
+ C
508
+
509
+ ![](images/8357b1f7cf079f4433ace33ae357a8cce5c5bbe3741aa00985d510c614ec3825.jpg)
510
+
511
+ ![](images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg)
512
+
513
+ ![](images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg)
514
+ C
515
+ C
516
+
517
+ ![](images/8eeca22c13a880c372f86b1bd6d50e828536005a351d735514d7e9130695399e.jpg)
518
+ C
519
+ C
520
+ Figure 22. Regression results of the scaling laws coefficients. our estimation of the scaling coefficients is close to the closed form solution.
521
+
522
+ # C.4. Scaling laws for different target data type
523
+
524
+ In Figure 27, we derive the scaling laws for different target data types. In general, we observe that the model learns image captioning faster than interleaved data, as indicated by the higher absolute value of the scaling exponent (e.g., 0.062 vs 0.046), despite using the same data ratio for captioning and interleaved data (45% each). Additionally, we find that the model learns more slowly on text-only data, likely due to the smaller amount of text-only data (10%). Across model configurations, we find that early fusion scales similarly to late fusion on image captioning but has a lower multiplicative constant (49.99 vs 47.97). For MoEs, the model learns faster but exhibits a higher multiplicative constant. On text and interleaved data, early and late fusion models scale similarly and achieve comparable
525
+
526
+ ![](images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg)
527
+ Figure 23. Observed vs predicted loss. We visualize the loss predicted by our scaling laws (Eq 1) and the actual loss achieved by each run.
528
+
529
+ ![](images/eae4c7c1f75345ab401e7eb770895d809db3271eff8cce1157ab801bf3947c98.jpg)
530
+
531
+ performance. However, MoEs demonstrate better overall performance while learning slightly more slowly.
532
+
533
+ # C.5. Scaling laws for different training mixtures
534
+
535
+ We investigate how the scaling laws change when modifying the training mixtures. Specifically, we vary the ratio of image caption, interleaved, and text-only data and report the results in Figure 28. Overall, we observe similar scaling trends, with only minor changes in the scaling coefficients. Upon closer analysis, we find that increasing the ratio of a particular data type in the training mixture, leads to a corresponding increase in its scaling exponent. For instance, increasing the ratio of image captions from $30\%$ to $40\%$ raises the absolute value of the exponent from 0.056 to 0.061. However, for text-only data, we do not observe significant changes in the scaling coefficients when varying its proportion in the training mixture.
536
+
537
+ <table><tr><td>Parameter</td><td>MSE</td><td>R2</td><td>MAE (%)</td></tr><tr><td>Held-in</td><td>0.0029</td><td>0.9807</td><td>0.8608</td></tr><tr><td>Held-out</td><td>0.0004</td><td>0.9682</td><td>0.5530</td></tr></table>
538
+
539
+ Table 10. Scaling laws prediction errors. We report the mean square error, R2 and mean absolute error for the loss prediction for held-in and held-out (8B model) data.
540
+
541
+ <table><tr><td>Model</td><td>E</td><td>α</td><td>β</td><td>a</td><td>b</td><td>d</td></tr><tr><td>Avg</td><td>1.80922</td><td>0.29842</td><td>0.33209</td><td>0.54302</td><td>0.48301</td><td>0.92375</td></tr><tr><td>Std</td><td>0.33811</td><td>0.10101</td><td>0.02892</td><td>0.08813</td><td>0.05787</td><td>0.23296</td></tr></table>
542
+
543
+ Table 11. Scaling laws sensitivity. We report the mean and standard deviation after bootstrapping with 100 iterations.
544
+
545
+ # C.6. Scaling laws evaluation
546
+
547
+ For each model size and number of training tokens, we compute the loss using the estimated functional form in Eq 1 and compare it to the actual loss observed in our runs. Figure 23, Figure 24, and Table 10 visualizes these comparisons, showing that our estimation is highly accurate, particularly for lower loss values and larger FLOPs. We also assess our scaling laws in an extrapolation setting, predicting performance beyond the model sizes used for fitting. Notably, our approach estimates the performance of an 8B model with reasonable accuracy.
548
+
549
+ Additionally, we conduct a sensitivity analysis using bootstrapping. Specifically, we sample $P$ points with replacement ( $P$ being the total number of trained models) and re-estimate the scaling law coefficients. This process is repeated 100 times, and we report the mean and standard deviation of each coefficient. Table 11 shows that our estimation is more precise for $\beta$ than for $\alpha$ , primarily due to the smaller number of model sizes relative to the number of different token counts used to derive the scaling laws.
550
+
551
+ # C.7. Scaling laws for sparse NMMs.
552
+
553
+ Similar to dense models, we fit a parametric loss function (Eq 1) to predict the loss of sparse NMMs based on the number of parameters and training tokens, replacing the total parameter count with the number of active parameters. While incorporating sparsity is standard when deriving scaling laws for MoEs [2, 33, 74], we focus on deriving scaling laws specific to the sparsity level used in our MoE setup. This yields coefficients that are implicitly conditioned on the sparsity configuration.
554
+
555
+ We also experiment with a sparsity-aware formulation of the scaling law as proposed in [2], and observe consistent
556
+
557
+ ![](images/a40ce810c6407918e90c23eeda423059a15b50810c13f19580f13fa38ea33ab6.jpg)
558
+ Figure 24. Observed vs predicted loss. We visualize the loss predicted by our scaling laws Eq 1 and the actual loss achieved by each run. We can reliably predict the performance of models larger (8B params) than those used to fit the scaling laws.
559
+
560
+ trends (Table 12). In particular, the exponents associated with model size $(N)$ are substantially larger than those for training tokens $(\beta)$ , reinforcing the importance of scaling model size in sparse architectures. Additionally, we observe that the terms governing the scaling of active parameters decompose into two components.
561
+
562
+ # D. Discussion and Limitations
563
+
564
+ Scaling laws for multimodal data mixtures. Our scaling laws study spans different model configurations and training mixtures. While results suggest that the scaling law coefficients remain largely consistent across mixtures, a broader exploration of mixture variations is needed to validate this observation and establish a unified scaling law that accounts for this factor.
565
+
566
+ Scaling laws and performance on downstream tasks. Similar to previous scaling law studies, our analysis focuses on pretraining performance as measured by the validation loss. However, the extent to which these findings translate to downstream performance remains an open question and requires further investigation.
567
+
568
+ Extrapolation to larger scales. The accuracy of scaling law predictions improves with increasing FLOPs Appendix C. Furthermore, we validate our laws when extrapolating to larger model sizes (Appendix C.6). However, whether these laws can be reliably extrapolated to extremely large model sizes remains an open question.
569
+
570
+ High resolution and early-fusion models. Training early-fusion models with high-resolution inputs leads to a significant increase in vision tokens. While pooling techniques have been widely adopted for late-fusion models, alternative approaches may be necessary for early fusion. Given the similarity of early-fusion models to LLMs, it appears
571
+
572
+ that techniques for extending context length could be beneficial.
573
+
574
+ Scaling laws for multimodal MoEs models. For MoEs, we consider only a single configuration (top-1 routing with 8 experts). We found this configuration to work reasonably well in our setup, and follow a standard MoEs implementation. However, the findings may vary when optimizing more the MoE architecture or exploring different load-balancing, routing strategies or different experts implementations.
575
+
576
+ # E. Mixture of experts and modality-specific specialization
577
+
578
+ # E.1. MoEs configuration
579
+
580
+ We experiment with different MoEs configuration by changing the number of experts and the top-k. We report a sample of these experiments in Table 13.
581
+
582
+ # E.2. MoEs specialization
583
+
584
+ ![](images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg)
585
+ Figure 25. Modality-specific specialization. We visualize the experts specialization to text and image modalities. Models are evaluated on Obelics.
586
+
587
+ We investigate multimodal specialization in MoE architectures. We compute a specialization score as the average difference between the number of text/images tokens assigned to each expert and a uniform assignment $(1 / E)$ . Additionally, we visualize the normalized number of text and image tokens assigned to each expert across layers. Figure 25 shows clear modality-specific experts, particularly in the early layers. Furthermore, the specialization score decreases as the number of layers increases but rises again in the very last layers. This suggests that early and final layers require more modality specialization compared to mid-layers. Additionally, we observe several experts shared between text and image modalities, a phenomenon not present in hard-routed or predefined modality-specific experts.
588
+
589
+ <table><tr><td colspan="3">L(N,D) = E + A/Nα + B/Dβ</td><td>vs</td><td colspan="7">L(N,D,S) = A/Nα + B/Dβ + C(1-S)λ + d(1-S)δNγ</td></tr><tr><td>Model</td><td>E</td><td>A</td><td>B</td><td>α</td><td>β</td><td>λ</td><td>δ</td><td>γ</td><td>C</td><td>d</td></tr><tr><td>L(N,D) (Eq 1)</td><td>2.158</td><td>381773</td><td>4659</td><td>0.710</td><td>0.372</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>L(N,D,S) [2]</td><td>1.0788</td><td>1</td><td>4660</td><td>0.5890</td><td>0.3720</td><td>0.2</td><td>0.2</td><td>0.70956</td><td>1.0788</td><td>381475</td></tr></table>
590
+
591
+ Table 12. Scaling laws for sparse native multimodal models.
592
+
593
+ <table><tr><td rowspan="2"></td><td colspan="6">Accuracy</td><td colspan="2">CIDEr</td></tr><tr><td>AVG</td><td>VQAv2</td><td>TextVQA</td><td>OKVQA</td><td>GQA</td><td>VizWiz</td><td>COCO</td><td>TextCaps</td></tr><tr><td>4-E-top-1</td><td>40.0552</td><td>64.068</td><td>14.284</td><td>41.948</td><td>61.46</td><td>18.516</td><td>62.201</td><td>34.08</td></tr><tr><td>8-E-top-1</td><td>41.6934</td><td>65.684</td><td>17.55</td><td>42.908</td><td>63.26</td><td>19.065</td><td>67.877</td><td>39.63</td></tr><tr><td>8-E-top-2</td><td>42.8546</td><td>66.466</td><td>19.162</td><td>45.344</td><td>63.94</td><td>19.361</td><td>65.988</td><td>41.649</td></tr><tr><td>8-E-top-2 finegrained</td><td>39.904</td><td>62.76</td><td>15.58</td><td>41.88</td><td>61.6</td><td>17.7</td><td>57.52</td><td>35.42</td></tr></table>
594
+
595
+ Table 13. SFT results with different MoEs configurations.
596
+
597
+ ![](images/925871664600fa369934dd20a2f34c9ad334d39d98206e080eb90db5737c852f.jpg)
598
+ Figure 26. Scaling laws for native multimodal models. From left to right: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. The scaling exponents are very close for all models. However, MoEs leads to overall lower loss (smaller multiplicative constant) and takes longer to saturate.
599
+
600
+ ![](images/a5eae89bad3ce176fd8a9fbfe3fb8e612accba59e9227da594fe7f8363a3bd57.jpg)
601
+
602
+ ![](images/92ad024f678fc1ec2ce39f984eafbe6ca1eaeb8ab89672d5e361993c173ed68b.jpg)
603
+
604
+ ![](images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg)
605
+
606
+ ![](images/fb070c10fca1f4655ac4f3b29724b2851fad05ebde2ebfed26e2f7526c0576ed.jpg)
607
+
608
+ ![](images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg)
609
+
610
+ ![](images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg)
611
+
612
+ ![](images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg)
613
+ Figure 27. Scaling laws for native multimodal models. From top to bottom: late-fusion (dense), early-fusion (dense) and early-fusion MoEs. From left to right: cross-entropy on the validation set of image-caption, interleaved and text-only data.
614
+
615
+ ![](images/87a7df1c2a6cde40a771564ee46e3757cc1a708b33a759bba95feacc6350135a.jpg)
616
+
617
+ ![](images/b7303f5247e7b6e588dfa05e005f69657763ba42c509db0cf3096a7e8f1b8e8d.jpg)
618
+
619
+ <table><tr><td>0.289B</td><td>0.494B</td><td>1B</td><td>1.748B</td><td>2.430B</td><td>3.714B</td></tr><tr><td>0.275B</td><td>0.464B</td><td>0.932B</td><td>1.627B</td><td>2.280B</td><td>3.354B</td></tr><tr><td>0.275B</td><td>0.464B</td><td>0.932B</td><td>1.627B</td><td>2.280B</td><td>3.354B</td></tr></table>
620
+
621
+ ![](images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg)
622
+
623
+ ![](images/f1d1d1a9ae09d693813d534ed3e52995b2b53c52b57e1dea64039a244f0d113b.jpg)
624
+ 45-45-10
625
+ 40-20-40
626
+
627
+ ![](images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg)
628
+
629
+ ![](images/cb4ae73517140700ebf19aea43a4b90376fd6e9529aca7bd298171d3300e1774.jpg)
630
+
631
+ ![](images/ea60f82ab55e1707f5708b66a23159e1bf846ea0bd5d17664d67a614b59b36da.jpg)
632
+ 30-30-40
633
+
634
+ ![](images/fdb45bc5fa88e9e4889729e2053cff1cee23f3c95a045183987d941d85b99456.jpg)
635
+
636
+ ![](images/d1f9ebe2963b7671652b537ba3995aa73ec10a81a9fce26f78cb63d714e35caf.jpg)
637
+
638
+ ![](images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg)
639
+ 20-40-40
640
+
641
+ ![](images/af0628a1ce2945b362e0e67e09294f4be2d6a8597dc24e17cd4ce6c346c9a960.jpg)
642
+
643
+ ![](images/80f27c4c3ad4d2f64ee10c2d8bb2923fb6dd8bdd2496f979ee4083e210d57191.jpg)
644
+
645
+ ![](images/97f7d3f77f2b4f693354d3f8412b5275b071e1b828e08f12a883241d15b4b73f.jpg)
646
+
647
+ ![](images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg)
648
+
649
+ ![](images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg)
650
+ Figure 28. Scaling laws for early-fusion native multimodal models. Our runs across different training mixtures (Image-caption-Interleaved-Text) and FLOPs. We visualize the final validation loss on 3 data types: HQITP (left), Obelics (middle) and DCLM (right).
data/2025/2504_07xxx/2504.07951/images/017cd4e44d6041fbf5d92d6449f51dc545d6432fffd337fa70b94d0dc159c6e0.jpg ADDED

Git LFS Details

  • SHA256: c299be90f4d08a9de210a762e0aa6723adb0bdf5c20e440443285a2a8dc7a513
  • Pointer size: 130 Bytes
  • Size of remote file: 15.6 kB
data/2025/2504_07xxx/2504.07951/images/045203ceb8b4e4655a60480c74ed6b69e687bea09891aca71efb66fa919250c1.jpg ADDED

Git LFS Details

  • SHA256: e59c46793a8db13f80324dba5a172f9366419d0a66d70f875bb5fa6dbd7fc00c
  • Pointer size: 130 Bytes
  • Size of remote file: 30.7 kB
data/2025/2504_07xxx/2504.07951/images/0457adae4a061cd434da8b673b2881180bd66aaec1951b226848f1504508a81e.jpg ADDED

Git LFS Details

  • SHA256: 3e3587a9a919b215d401866418a33aa0f0f531d7246cab393e6fd6489d92b65b
  • Pointer size: 130 Bytes
  • Size of remote file: 23.9 kB
data/2025/2504_07xxx/2504.07951/images/05809573792c53ca70b02f70c994411ae668a3b5b2344a38b3d284b30400e43d.jpg ADDED

Git LFS Details

  • SHA256: 0e009010ee4e055a0c2065e0255dbb23628bde4430c65dba21a390035a2a0130
  • Pointer size: 130 Bytes
  • Size of remote file: 32.6 kB
data/2025/2504_07xxx/2504.07951/images/091e908750bec32dc612b24b77318d4392a00579c8de5564bd1f4aaeac00cffe.jpg ADDED

Git LFS Details

  • SHA256: 43fd38dc1e656d0523ac970e360a36721f76885b8c134835e9ef3d1295c945dd
  • Pointer size: 130 Bytes
  • Size of remote file: 20.4 kB
data/2025/2504_07xxx/2504.07951/images/10dbbf3ec8073a52df28e88684bc9882d3a1ce36b6a9552bc7fb858ad7c9543e.jpg ADDED

Git LFS Details

  • SHA256: 57dafef267170d559349ef6c25c23b5f6d61e4296a4ec0a1228432360b1401f8
  • Pointer size: 130 Bytes
  • Size of remote file: 25.2 kB
data/2025/2504_07xxx/2504.07951/images/11479b86f8758143bec163b9b3f731a9a13ce7f8d8af1f77885578d6fc23d4d9.jpg ADDED

Git LFS Details

  • SHA256: 13ecb42111eed39a7e22ce87e2a3d62ac2678cbbd18caf691c39ea2d385ee7ca
  • Pointer size: 130 Bytes
  • Size of remote file: 19.5 kB
data/2025/2504_07xxx/2504.07951/images/1249bd8d2556f58d1f41ad80f0ef011fad2d11b1c4b68a40881aad72f6f4d8f5.jpg ADDED

Git LFS Details

  • SHA256: 20299e4ff31ef9beb8023b5f2f1987ade5176fcf7687b78f1e88d47adacd0bdc
  • Pointer size: 130 Bytes
  • Size of remote file: 10.3 kB
data/2025/2504_07xxx/2504.07951/images/12a7acbd253e8fc8060bb23066911da65f40d736f2b8fbbf41ef5b64ea350b44.jpg ADDED

Git LFS Details

  • SHA256: 25f3c6df6865ff16443300c50bcc48386e74c378cb9759f54fc96c5b4b1cba7e
  • Pointer size: 129 Bytes
  • Size of remote file: 7.93 kB
data/2025/2504_07xxx/2504.07951/images/130ab8d89bfbb0daec936aeeb6ae15603aaee2a3b5a736c530f7a7e8d6b259fe.jpg ADDED

Git LFS Details

  • SHA256: fa7e5b97cf4989dc983afb9ef466c50db9fdbf30d679141c508ec4e62ac12117
  • Pointer size: 130 Bytes
  • Size of remote file: 10.1 kB
data/2025/2504_07xxx/2504.07951/images/151c20de476ef01f160a8055c4f9468eef0286edcc9f11c199662da996e79625.jpg ADDED

Git LFS Details

  • SHA256: 720fb9e8f3f79b40801bf394e74776eef85e057fc8a82e6dba462fda812940f7
  • Pointer size: 130 Bytes
  • Size of remote file: 18.3 kB
data/2025/2504_07xxx/2504.07951/images/163a2ab479abcc17e9353749f796560b4c9b3868903e73a7bd4261351ac385ac.jpg ADDED

Git LFS Details

  • SHA256: b2778d89edb5349a005d8af9cd8ca186a78feac5e04ec558e3a0ef3b1d5eba7f
  • Pointer size: 129 Bytes
  • Size of remote file: 8.14 kB
data/2025/2504_07xxx/2504.07951/images/170aa90031c86716a60fdb8bca8d1f9c6abc83012c624d6aa092ecd2d1457844.jpg ADDED

Git LFS Details

  • SHA256: 53864266b05762bec23855b03935fdcfd5b35170b34965cff8f8c438568bbd02
  • Pointer size: 130 Bytes
  • Size of remote file: 18.5 kB
data/2025/2504_07xxx/2504.07951/images/19bb36aad451a1cd656099f17e52e909bfb31aefe24539cfdfbdde715fde3f60.jpg ADDED

Git LFS Details

  • SHA256: ee4845340080f9640f5a78d7dd8f6184a83146f6232b5220c40630e91a2c0057
  • Pointer size: 130 Bytes
  • Size of remote file: 21.8 kB
data/2025/2504_07xxx/2504.07951/images/1f84b67b5d7cfa4cb69e5df7625489543ef35e1179c8f78db4afa37ad99ee036.jpg ADDED

Git LFS Details

  • SHA256: 9e141cddf77f03676d482402e4caa7d819cab22b470a6646cf0238ce7268f9fa
  • Pointer size: 129 Bytes
  • Size of remote file: 8.54 kB
data/2025/2504_07xxx/2504.07951/images/20c4e076762da2e42468513928ebe646d8ecfb3fe53aae03d0f904e8b15de96b.jpg ADDED

Git LFS Details

  • SHA256: ca85a7b6f288f15890f21fa9a222ebb1e853f305df2e3e747df5e5da2d205c49
  • Pointer size: 130 Bytes
  • Size of remote file: 15.1 kB
data/2025/2504_07xxx/2504.07951/images/20f8b6dd661ab17387d6361040a2dda735af8ad5b613ab0bc7dd00e0c1dfca59.jpg ADDED

Git LFS Details

  • SHA256: 57ba7cd72485efe6551b5f4d2c51fd4d13395a078a13ce9588216a18bec1eb3c
  • Pointer size: 129 Bytes
  • Size of remote file: 9.34 kB
data/2025/2504_07xxx/2504.07951/images/21e8fdc385d8e3156ac6fa8c201c6a66e47d61ac4cf92ccd51951f6d42c2f9ef.jpg ADDED

Git LFS Details

  • SHA256: ddb95b66ec672b8cf0afc45ced9c05eff3bfe484dde7941e43a3c684599593a3
  • Pointer size: 129 Bytes
  • Size of remote file: 9.56 kB
data/2025/2504_07xxx/2504.07951/images/2298a5bd5a365cb44cab1b103fb156142e541e4de0c91847da088ebeb6d5772b.jpg ADDED

Git LFS Details

  • SHA256: 8c22ea3c3c957fbadc689a3d6a4db0510210a4894141d69f1a6ff47fe545046b
  • Pointer size: 130 Bytes
  • Size of remote file: 19.7 kB
data/2025/2504_07xxx/2504.07951/images/29d9735919725580929983c6cf0f1e57af47d8b28095af285fee7f7e08e14bfc.jpg ADDED

Git LFS Details

  • SHA256: 001f6cad6611bdebdd14bfa351403787680ec01cc5dae2178aef7f6abf36d6ec
  • Pointer size: 130 Bytes
  • Size of remote file: 18.3 kB
data/2025/2504_07xxx/2504.07951/images/2c3c0faf8744d893068c96328767e8b3cb6814d50ea95a957ae80e790bdaa99e.jpg ADDED

Git LFS Details

  • SHA256: 9730c4156b3f954351ec4c8c70e38ecea6a4708e483747297c048cdb7e50fdf0
  • Pointer size: 130 Bytes
  • Size of remote file: 68.4 kB
data/2025/2504_07xxx/2504.07951/images/30f0b77b47bad4996304ae3eb682ca659d3838eb5fdb97502507841b6fa9a450.jpg ADDED

Git LFS Details

  • SHA256: 2cd5b165e0775e023e82abfbe2a2e6fb9a16c6b15284da35157904489c6bcdd9
  • Pointer size: 130 Bytes
  • Size of remote file: 13.3 kB
data/2025/2504_07xxx/2504.07951/images/3158294d4bed882c5795c15d51f30187e0425323247a897fc1f73162cbb2ca5e.jpg ADDED

Git LFS Details

  • SHA256: ff22de7ee42b98619b851be725cb0f33876754d8e6c2f54561f454922d4d183b
  • Pointer size: 130 Bytes
  • Size of remote file: 18.3 kB
data/2025/2504_07xxx/2504.07951/images/318e688155ccd183d4e1428cd31f0225ef444eeaf637aece6b3e58c56e5d7812.jpg ADDED

Git LFS Details

  • SHA256: 58648264ead285999759e962b2db15ace371e13fe3379ce8d8f388db17717a87
  • Pointer size: 129 Bytes
  • Size of remote file: 3.89 kB
data/2025/2504_07xxx/2504.07951/images/35b33667f30401245922f03ec33eabdee73856313eeb1047a34b45938190086e.jpg ADDED

Git LFS Details

  • SHA256: fd18a4827977af4d1d1ad797a0e6d44cc60b277a8926a4e57064b952d2669ae3
  • Pointer size: 130 Bytes
  • Size of remote file: 17.1 kB
data/2025/2504_07xxx/2504.07951/images/3608745d85340830d3714b391c6401e08ae575b7c2858fdf864af7cb9b124f87.jpg ADDED

Git LFS Details

  • SHA256: 5a1a6c54759c005e5f718dda1515156ba10a0f12d49c5df18ed17133698a1a43
  • Pointer size: 130 Bytes
  • Size of remote file: 44.2 kB
data/2025/2504_07xxx/2504.07951/images/38e56edac0d3be48f9d017b5be0ecba6837647563e6fba8f0f05f1b2c5885b56.jpg ADDED

Git LFS Details

  • SHA256: e214c8e6c9ab997910939ae09d3bd4c74879ec51b11566a2019bd901c9c483b7
  • Pointer size: 130 Bytes
  • Size of remote file: 35.1 kB
data/2025/2504_07xxx/2504.07951/images/3ae00f36a4a66bc014814831c85b16c3b2c789a86ea8281e5e74facae8c1ca14.jpg ADDED

Git LFS Details

  • SHA256: 51b83f693ab4d342cf4cdf1d70b11aa028bc303383da261b51bb972de9ca5901
  • Pointer size: 130 Bytes
  • Size of remote file: 19.8 kB
data/2025/2504_07xxx/2504.07951/images/4133a9cbf5fb81e279a53a22c25cc8e1a31cad79eb8d7f96cf9c1d0cbaf75ebd.jpg ADDED

Git LFS Details

  • SHA256: 7d7cb57b252f9b10c5e7ce6ee730b68dd2f98e3187b8878a7e88386bcfa2a8b9
  • Pointer size: 130 Bytes
  • Size of remote file: 11.7 kB
data/2025/2504_07xxx/2504.07951/images/45331be5dffd8bf1906423e01879dee5d3a816ec501329e152690ebd1e24e59f.jpg ADDED

Git LFS Details

  • SHA256: f5241c7dd1680ce9063461e7d99a59802d5b217ed53e3fa59d3def96d5329550
  • Pointer size: 129 Bytes
  • Size of remote file: 7.84 kB
data/2025/2504_07xxx/2504.07951/images/48ddff1697fd13cb0e2777d4878ca97b8b1c84373b2fd14b4c89423c97881f5b.jpg ADDED

Git LFS Details

  • SHA256: caebbcafeb45b4780d0591eb52c4ee4adb1b6d262049bcec14ff3aaa2d022a63
  • Pointer size: 130 Bytes
  • Size of remote file: 16.4 kB
data/2025/2504_07xxx/2504.07951/images/4940c0e3eb2f8c85622a677039121632355bd85038b5913c73fa6ec971513244.jpg ADDED

Git LFS Details

  • SHA256: c7a187892bca234898962bfab0289e7bfc7887121bf1ce9579f39751b9066e6a
  • Pointer size: 130 Bytes
  • Size of remote file: 16 kB
data/2025/2504_07xxx/2504.07951/images/4a1035b9a18a960ba7e2cd7aca5a49f2e04a75b6df481835ae8d4417804fcb2d.jpg ADDED

Git LFS Details

  • SHA256: 52bd688305eec5a9104826d11933e4d853adc0122cb96d4f13a3090344047efa
  • Pointer size: 130 Bytes
  • Size of remote file: 28 kB
data/2025/2504_07xxx/2504.07951/images/4dde252ff0946067af23011ac2b4db6aa4011b6c34fbfbc6549057a599da712d.jpg ADDED

Git LFS Details

  • SHA256: 2e15cc51c0d02e9fefb97cca8ef6d9c37fe1be4dc67c60e8aef9b551726a2533
  • Pointer size: 130 Bytes
  • Size of remote file: 21.7 kB
data/2025/2504_07xxx/2504.07951/images/4e86f0374868e29a9f27723f7ab7e8a000db7028e3af86023ac4513fa225732c.jpg ADDED

Git LFS Details

  • SHA256: 6619689f4ef83741e82dc6b61eb3060430041efe5e0d13d315508405f6988b0b
  • Pointer size: 130 Bytes
  • Size of remote file: 22.2 kB
data/2025/2504_07xxx/2504.07951/images/58b8a90492785ea6eb48f59e69bdee2ab2ead0444b721b6d7b2d5a5ac625d566.jpg ADDED

Git LFS Details

  • SHA256: a5a09e10ab45b6c1396ad9cbc4588e1163a242409f2fd450f222b3a50ed1be18
  • Pointer size: 130 Bytes
  • Size of remote file: 19.5 kB
data/2025/2504_07xxx/2504.07951/images/5c5d809252a6557e3554685ef21f3f3bb5c397b4474746975fe77434e16c52b3.jpg ADDED

Git LFS Details

  • SHA256: c585017e193f5843e168afcdf5219383df7541021a5dca8afb1603b6250bb6fe
  • Pointer size: 130 Bytes
  • Size of remote file: 80.2 kB
data/2025/2504_07xxx/2504.07951/images/5d510b88dd42b8ddd6e4d6b000a38f3524b9b76ce933ceb6ce23cf3dbff52932.jpg ADDED

Git LFS Details

  • SHA256: 1964563c05b2828176dc6e3ccc2be70dd179418e328d02838365a0a836d2c9ae
  • Pointer size: 130 Bytes
  • Size of remote file: 15.2 kB
data/2025/2504_07xxx/2504.07951/images/5f8f59f429c09150c5fcb8b66558a02dcabcfe38634d5ce492da13a607485231.jpg ADDED

Git LFS Details

  • SHA256: 9cbcea4e2d0d5b251132af1fd91ee5fc056d307b3e6c46a4b20f9353b3558c72
  • Pointer size: 129 Bytes
  • Size of remote file: 9.26 kB
data/2025/2504_07xxx/2504.07951/images/62cf5e7b98b47d2792dc6b7fe326f249af87efb09ffa6f45b0a87b47d5481909.jpg ADDED

Git LFS Details

  • SHA256: 0424a3f313d7659a5530528e3d40ccb6023f13f6cce5354d9966de3ddd6b02c6
  • Pointer size: 130 Bytes
  • Size of remote file: 12.7 kB
data/2025/2504_07xxx/2504.07951/images/675dbf1f4b9b9be1ca72b30b1a7af76972f55649829f8678333c939553f2b746.jpg ADDED

Git LFS Details

  • SHA256: 1923c073b5a435cd1ccb707120c9f4d4f8fd4fcc5da7d1d9fa5f7311a86d550e
  • Pointer size: 130 Bytes
  • Size of remote file: 15.2 kB
data/2025/2504_07xxx/2504.07951/images/6f61e1f3f53e3ddd36f12868abec144cf60c2677463e6e9ba5cb4b0e8e6d8985.jpg ADDED

Git LFS Details

  • SHA256: 34d661d5a77e2383a44d093491116602b92130c0e442ee546ffff69c888eaba0
  • Pointer size: 130 Bytes
  • Size of remote file: 16.2 kB
data/2025/2504_07xxx/2504.07951/images/7229c0205223fd2165a40f38f790f64f89c1c7601693a992b739a3d796f80bf7.jpg ADDED

Git LFS Details

  • SHA256: 8abbf46a89c1cbf75d66a07e5a4eedc0e7aaf036c3742d6a98e13416b5d604cc
  • Pointer size: 130 Bytes
  • Size of remote file: 22.1 kB
data/2025/2504_07xxx/2504.07951/images/7445330c28293501507c7a1de3b802c291b7e7d4af6025b3396fca03b97b7816.jpg ADDED

Git LFS Details

  • SHA256: f4f14ff1a3a8d341c960a9f003760cc078d3594c59741697b99f725d88efee34
  • Pointer size: 130 Bytes
  • Size of remote file: 18.5 kB
data/2025/2504_07xxx/2504.07951/images/768487d1cb2c067b0f6e62f53328b62a8037bbd733461c754d3ae55f1b154cd2.jpg ADDED

Git LFS Details

  • SHA256: 780bfa1d35ea5f06e00e16bfa00f1ab4422c9e69e8437783cba6023ea47b7ac1
  • Pointer size: 130 Bytes
  • Size of remote file: 16.5 kB