Chelsea707 commited on
Commit
04ec967
·
verified ·
1 Parent(s): 41e3cbd

MinerU Batch 5421be69-0755-4e96-8394-e2f3dca8a1ab (Part 2/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_content_list.json +0 -0
  3. data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_model.json +0 -0
  4. data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_origin.pdf +3 -0
  5. data/2025/2504_16xxx/2504.16915/full.md +476 -0
  6. data/2025/2504_16xxx/2504.16915/images/035079926d46f2f8da63126c0b7a2b7d29bc12783cfe5d59e518e6b0f5722fd8.jpg +3 -0
  7. data/2025/2504_16xxx/2504.16915/images/0432a07ef93557345d5410e55f0a84064c863687eec35f8deda9a56c7fbb8dce.jpg +3 -0
  8. data/2025/2504_16xxx/2504.16915/images/07260139273f514c42045fe956c86ba1fd6e8fa298241d0ec6b0b61e9341e0d8.jpg +3 -0
  9. data/2025/2504_16xxx/2504.16915/images/0744fe79fd2b947138ea8f8826934b0d09f3393cbcac54c6325e4593e078cdb9.jpg +3 -0
  10. data/2025/2504_16xxx/2504.16915/images/09faad96bf101550a3b05cbc6476d051043ed07a04555bf067635a8647db808e.jpg +3 -0
  11. data/2025/2504_16xxx/2504.16915/images/0a37a84c34bcf97d4ae65a0d82bbcfc39b811e9371dcc7a49b0adb367688db03.jpg +3 -0
  12. data/2025/2504_16xxx/2504.16915/images/0f7ddb68b6175778bbefe5618e3f4f6ef9f17d91089c4a31b3703ec1b1f38160.jpg +3 -0
  13. data/2025/2504_16xxx/2504.16915/images/133116da6d91d0149199e87f7ec2c280eab6112a92179f3df95f558eb764b680.jpg +3 -0
  14. data/2025/2504_16xxx/2504.16915/images/19a5b372324eb3eb330e76b773d7bde6ca02dc4952249c8a77ceb8c6829803a2.jpg +3 -0
  15. data/2025/2504_16xxx/2504.16915/images/1eb7bb537257f5472de95bbe7ca0af4248c5292d4a57eaa775dfb7317bfe0a0d.jpg +3 -0
  16. data/2025/2504_16xxx/2504.16915/images/279495053316c8b020c2dd35bebec211f19221ae503c48c7d49c50c5ba115002.jpg +3 -0
  17. data/2025/2504_16xxx/2504.16915/images/34a7b121779591bdaa232e74d32181ee34cb59ac1f76781e9cfd988ee9d3fd36.jpg +3 -0
  18. data/2025/2504_16xxx/2504.16915/images/39e7331373b2d07820d10133371317fc8d79b6d0906717822d02bab35db0cab9.jpg +3 -0
  19. data/2025/2504_16xxx/2504.16915/images/3b186c98eb21f754d119318ae4505a43cf224efab1432d1b2f309745e0bd26d0.jpg +3 -0
  20. data/2025/2504_16xxx/2504.16915/images/3d7c45991e96321253eeedc99a23a4aee4c99f044b2529919423e4a4e67dfa1d.jpg +3 -0
  21. data/2025/2504_16xxx/2504.16915/images/3f9b4e74553651c0ddbe13cd6321ae260acf1f75f8d0f0f4a5f81965d6a3dc84.jpg +3 -0
  22. data/2025/2504_16xxx/2504.16915/images/3f9e1f0507ce8abd48a60c47b843dca5cf96a834f02b50c5e45888c4019f1281.jpg +3 -0
  23. data/2025/2504_16xxx/2504.16915/images/4712a1a51ee9fd15a3fa96ea8683d6dfb530235f43d3ce9a836e3e12fb10be98.jpg +3 -0
  24. data/2025/2504_16xxx/2504.16915/images/48d6ce06c9e3f7d066c086ec66a63afd5e45fa6e67cae473db8a93dc32b5bee6.jpg +3 -0
  25. data/2025/2504_16xxx/2504.16915/images/512df60b48f29d339264abecf08830607357111e99bc45ac5d7dd6f4c81337d7.jpg +3 -0
  26. data/2025/2504_16xxx/2504.16915/images/59708c463d8b169038e1962f79cb519893304ca0a2b11d0b59026a5bfe791ce1.jpg +3 -0
  27. data/2025/2504_16xxx/2504.16915/images/6759b15ae1941017d09b2e234f99ae239f6ad90c61b12df9930421dd0689ad11.jpg +3 -0
  28. data/2025/2504_16xxx/2504.16915/images/6d6f9a7f0b69294238f41794540c52bba98be7b4630308b5e75e65c7530d1ab7.jpg +3 -0
  29. data/2025/2504_16xxx/2504.16915/images/6eeeee20f5cd2e9bce0722f58c88de082019986540f7c8f47f008cd9f840d613.jpg +3 -0
  30. data/2025/2504_16xxx/2504.16915/images/78d1e4bd0466cad344cac7d4141bd55bd2e9cb79871f51594e29aada63a1240a.jpg +3 -0
  31. data/2025/2504_16xxx/2504.16915/images/7b2a214f6fbc2c844a88ddcd4c48bf5d306ef500f8d7673ec7ba6a80caed4eb0.jpg +3 -0
  32. data/2025/2504_16xxx/2504.16915/images/7bb98310f45445acde8f349cf99527a0253e1c53a050d68a0812a68546789b7d.jpg +3 -0
  33. data/2025/2504_16xxx/2504.16915/images/83aa1c7a0518f1b56ac9817c9af2ed29f5d037a45aaa3f7c69c00cf2a36d6728.jpg +3 -0
  34. data/2025/2504_16xxx/2504.16915/images/87feb1edfe09a23211bbce95806941ffc02f28b39c9fb9d8d2a1c0ed450cb5d9.jpg +3 -0
  35. data/2025/2504_16xxx/2504.16915/images/8869e13e02c742ca66cab98f4bbd2b65a66bc17b61193d446b30ba1e56f3649f.jpg +3 -0
  36. data/2025/2504_16xxx/2504.16915/images/88ea109eb1ccbf381ca0aa3e52d19e4fc8614c363f1226a32695f7229c632802.jpg +3 -0
  37. data/2025/2504_16xxx/2504.16915/images/8a50704bbd8d4bcd63432205dfb35bb65173b618c455b328507ccfd92595fbd6.jpg +3 -0
  38. data/2025/2504_16xxx/2504.16915/images/8dbf023ee0aa591dc1781c1e102ced18c6364b930ef5ff055a9ae5a12fb123d9.jpg +3 -0
  39. data/2025/2504_16xxx/2504.16915/images/8e592aa193623242c25a71bb25b65a80211ad006043b22c6da9512ab81073638.jpg +3 -0
  40. data/2025/2504_16xxx/2504.16915/images/8f9bef8c1c0048754e93ca438422aa05e02fc97d2186ec338f23f0c557d87e83.jpg +3 -0
  41. data/2025/2504_16xxx/2504.16915/images/917dbb84a91b58d1b0e0eb31d1a6f488a793775e5842ef7d43caebbb9f2279fc.jpg +3 -0
  42. data/2025/2504_16xxx/2504.16915/images/9451fc131c15f6204b3bd192f65fea00354fa43b91f156e8fe8b7823cea3216c.jpg +3 -0
  43. data/2025/2504_16xxx/2504.16915/images/9b995ccb1ac20db173f09243ec34675af732723ed93d62cd0372b311174c4936.jpg +3 -0
  44. data/2025/2504_16xxx/2504.16915/images/a957a9c5df9c4a4746bf7469038ac30d1efbbaaeade4b2095e4bf9c8f172f86a.jpg +3 -0
  45. data/2025/2504_16xxx/2504.16915/images/aeb2b636c720541aaa9b57d12f25842ba7046487006e746bac70140cf3860a0e.jpg +3 -0
  46. data/2025/2504_16xxx/2504.16915/images/b3f856dddb811ccd38bba81c312339a9127f5f73f20ce41272e38f90ef08355e.jpg +3 -0
  47. data/2025/2504_16xxx/2504.16915/images/b5cf1d26ce8ea9c42b6e64aac574a867e2cbe4885ce5de12f731901c7fa4f223.jpg +3 -0
  48. data/2025/2504_16xxx/2504.16915/images/b7ed295f87b9d59e7150dcf77881c1c4305daaeed8f846da0a18fc0b8447e890.jpg +3 -0
  49. data/2025/2504_16xxx/2504.16915/images/ba88c876e371b5384caf1ad00462d7f9a10f96dc6262f35dd6909224ae83ed7b.jpg +3 -0
  50. data/2025/2504_16xxx/2504.16915/images/c03680d5e4b522a6c5c6dfcb68dfee64fcb6ad2fabcd589ae50215cf3f1b3158.jpg +3 -0
.gitattributes CHANGED
@@ -957,3 +957,11 @@ data/2025/2504_17xxx/2504.17669/3faade3f-c51e-4212-bcac-6690621a4713_origin.pdf
957
  data/2025/2504_17xxx/2504.17674/016c9cea-4420-4d3f-8bbc-ef903804ac03_origin.pdf filter=lfs diff=lfs merge=lfs -text
958
  data/2025/2504_17xxx/2504.17704/78dc1944-bdab-4ea7-a307-6a3f4638ebb6_origin.pdf filter=lfs diff=lfs merge=lfs -text
959
  data/2025/2504_17xxx/2504.17761/b446d886-99e9-48fe-9fa0-d32d8b9080f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
957
  data/2025/2504_17xxx/2504.17674/016c9cea-4420-4d3f-8bbc-ef903804ac03_origin.pdf filter=lfs diff=lfs merge=lfs -text
958
  data/2025/2504_17xxx/2504.17704/78dc1944-bdab-4ea7-a307-6a3f4638ebb6_origin.pdf filter=lfs diff=lfs merge=lfs -text
959
  data/2025/2504_17xxx/2504.17761/b446d886-99e9-48fe-9fa0-d32d8b9080f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
960
+ data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_origin.pdf filter=lfs diff=lfs merge=lfs -text
961
+ data/2025/2504_16xxx/2504.16929/bb27996c-f3d3-42a1-9191-e2b420a7e83f_origin.pdf filter=lfs diff=lfs merge=lfs -text
962
+ data/2025/2504_16xxx/2504.16980/446176ce-26d5-467e-b536-ba6bf0820b99_origin.pdf filter=lfs diff=lfs merge=lfs -text
963
+ data/2025/2504_17xxx/2504.17033/e6450449-0ebd-4cc8-a005-dda74aad845e_origin.pdf filter=lfs diff=lfs merge=lfs -text
964
+ data/2025/2504_17xxx/2504.17192/c60649e4-2487-45dc-a9ed-0119155d89fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
965
+ data/2025/2504_17xxx/2504.17207/8ecf96d8-b223-494a-9a42-c932a8d27087_origin.pdf filter=lfs diff=lfs merge=lfs -text
966
+ data/2025/2504_17xxx/2504.17343/4715aa03-ca2f-428d-bf66-e77b13bbe969_origin.pdf filter=lfs diff=lfs merge=lfs -text
967
+ data/2025/2504_17xxx/2504.17432/c2ec9565-d5a8-422f-a1dc-bef3536bf2ef_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_16xxx/2504.16915/b12d68de-23e6-462d-aa98-79da46044045_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c94067201a2ebb48b7c5fdf08d3e02f402809c969bb57e7fa3086535b09c31d
3
+ size 15474696
data/2025/2504_16xxx/2504.16915/full.md ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DreamO: A Unified Framework for Image Customization
2
+
3
+ CHONG MOU, Intelligent Creation Team, ByteDance; Peking University, China
4
+
5
+ YANZE WU\*†, Intelligent Creation Team, ByteDance, China
6
+
7
+ WENXU WU, Intelligent Creation Team, ByteDance, China
8
+
9
+ ZINAN GUO, Intelligent Creation Team, ByteDance, China
10
+
11
+ PENGZE ZHANG, Intelligent Creation Team, ByteDance, China
12
+
13
+ YUFENG CHENG, Intelligent Creation Team, ByteDance, China
14
+
15
+ YIMING LUO, Intelligent Creation Team, ByteDance, China
16
+
17
+ FEI DING†, Intelligent Creation Team, ByteDance, China
18
+
19
+ SHIWEN ZHANG, Intelligent Creation Team, ByteDance, China
20
+
21
+ XINGHUI LI, Intelligent Creation Team, ByteDance, China
22
+
23
+ MENGTIAN LI, Intelligent Creation Team, ByteDance, China
24
+
25
+ MINGCONG LIU, Intelligent Creation Team, ByteDance, China
26
+
27
+ YUNSHENG JIANG, Intelligent Creation Team, ByteDance, China
28
+
29
+ SHAOJIN WU, Intelligent Creation Team, ByteDance, China
30
+
31
+ SONGTAO ZHAO, Intelligent Creation Team, ByteDance, China
32
+
33
+ JIAN ZHANG, Peking University, China
34
+
35
+ QIAN HE, Intelligent Creation Team, ByteDance, China
36
+
37
+ XINGLONG WU, Intelligent Creation Team, ByteDance, China
38
+
39
+ ![](images/b5cf1d26ce8ea9c42b6e64aac574a867e2cbe4885ce5de12f731901c7fa4f223.jpg)
40
+ "A boy wearing shorts
41
+
42
+ ![](images/d89e9f9a21b6d205fc3e666af8d143695950c1fe15101269f2002559df2863ff.jpg)
43
+
44
+ ![](images/da2dfbfc4d19b79607b6d0530f0c5d084edc82694751ebde852be0b1b0b6dfe6.jpg)
45
+
46
+ ![](images/07260139273f514c42045fe956c86ba1fd6e8fa298241d0ec6b0b61e9341e0d8.jpg)
47
+ A toy hole
48
+
49
+ ![](images/7bb98310f45445acde8f349cf99527a0253e1c53a050d68a0812a68546789b7d.jpg)
50
+
51
+ ![](images/dc8d81fe3e5b03561e948466f1e744745af379992743203574dbc70acc5e5da9.jpg)
52
+
53
+ ![](images/279495053316c8b020c2dd35bebec211f19221ae503c48c7d49c50c5ba115002.jpg)
54
+
55
+ ![](images/48d6ce06c9e3f7d066c086ec66a63afd5e45fa6e67cae473db8a93dc32b5bee6.jpg)
56
+
57
+ ![](images/1eb7bb537257f5472de95bbe7ca0af4248c5292d4a57eaa775dfb7317bfe0a0d.jpg)
58
+ "Generate a same style image. A castle"
59
+
60
+ ![](images/e73a13338c0a96bfc2c5b597983c28d6cfa9d526fc4f1124a69191cb0dd6f7d7.jpg)
61
+
62
+ ![](images/3d7c45991e96321253eeedc99a23a4aee4c99f044b2529919423e4a4e67dfa1d.jpg)
63
+
64
+ ![](images/d90bcee0c489283d4026cfbce0029e7b61cd884bb59ef7c5eedbb52792393162.jpg)
65
+
66
+ ![](images/e45b4e92fbf5629937e4cf87291cf13febf068f1ec728323353fbebd46713ddd.jpg)
67
+
68
+ ![](images/4712a1a51ee9fd15a3fa96ea8683d6dfb530235f43d3ce9a836e3e12fb10be98.jpg)
69
+
70
+ ![](images/ebddb4b0b29d19cfa71cb87ffc60a356593b2c39a2c78a7a2452cce62e7c1c90.jpg)
71
+ "A man hold a box in the snowing
72
+
73
+ ![](images/8a50704bbd8d4bcd63432205dfb35bb65173b618c455b328507ccfd92595fbd6.jpg)
74
+ "am worn a
75
+ nglasses and wear a yellow hat in the forest
76
+ Fig. 1. The image customization capability of our proposed DreamO.
77
+
78
+ ![](images/ba88c876e371b5384caf1ad00462d7f9a10f96dc6262f35dd6909224ae83ed7b.jpg)
79
+ "Generate a same style image. A dog play in the park"
80
+ "A girl rides a giant dog, walking in the noisy modern city"
81
+
82
+ ![](images/f36842da03c3352f0bccf060b72ffc667d9181c0017040a5f5a496133883b1f0.jpg)
83
+ "The woman held the toy above her head in the park"
84
+
85
+ ![](images/e3bb3d2c64528c37cc4a1fe1f7c5a5da5df105f43084877a9216e11f20d00c8c.jpg)
86
+
87
+ *Correspondence: Yanze Wu <wuyanze123@gmail.com>
88
+
89
+ †Project leads
90
+
91
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the
92
+
93
+ author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
94
+
95
+ SA Conference Papers '25, December 15-18, 2025, Hong Kong, Hong Kong
96
+
97
+ © 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.
98
+
99
+ ACM ISBN 979-8-4007-2137-3/2025/12
100
+
101
+ https://doi.org/10.1145/3757377.3763956
102
+
103
+ Recently, extensive research on image customization (e.g., identity, subject, style, background, etc.) demonstrates strong customization capabilities in large-scale generative models. However, most approaches are designed for specific tasks, restricting their generalizability to combine different types of condition. Developing a unified framework for image customization remains an open challenge. In this paper, we present DreamO, an image customization framework designed to support a wide range of tasks while facilitating seamless integration of multiple conditions. Specifically, DreamO utilizes a diffusion transformer (DiT) framework to uniformly process input of different types. During training, we introduce a feature routing constraint to facilitate the precise querying of relevant information from reference images. Additionally, we design a placeholder strategy that associates specific placeholders with conditions at particular positions, enabling control over the placement of conditions in the generated results. Moreover, we employ a progressive training strategy to ensure smooth model convergence and correct the generation quality of the final output. Extensive experiments demonstrate that the proposed DreamO can effectively perform various image customization tasks with high quality and flexibly integrate different types of control conditions. Project page: https://mc-e.github.io/project/DreamO
104
+
105
+ # CCS Concepts: Computing methodologies $\rightarrow$ Computer vision.
106
+
107
+ Additional Key Words and Phrases: Diffusion-models, Image customization
108
+
109
+ # ACM Reference Format:
110
+
111
+ Chong Mou, Yanze Wu, Wenxu Wu, Zinan Guo, Pengze Zhang, Yufeng Cheng, Yiming Luo, Fei Ding, Shiwen Zhang, Xinghui Li, Mengtian Li, Mingcong Liu, Yunsheng Jiang, Shaojin Wu, Songtao Zhao, Jian Zhang, Qian He, and Xinglong Wu. 2025. DreamO: A Unified Framework for Image Customization. In SIGGRAPH Asia 2025 Conference Papers (SA Conference Papers '25), December 15-18, 2025, Hong Kong, Hong Kong. ACM, New York, NY, USA, 12 pages. https://doi.org/10.1145/3757377.3763956
112
+
113
+ # 1 INTRODUCTION
114
+
115
+ Due to the high-quality image generation and stable performance of diffusion models [Ho et al. 2020], substantial research efforts focus on controllable generation by leveraging their generative priors [Mou et al. 2024; Zhang et al. 2023]. Among these, image customization aims to ensure that generated outputs remain consistent with a reference image in specific attributes, such as identity [Guo et al. 2024; Wang et al. 2024a; Xiao et al. 2024b], object appearance [He et al. 2025; Huang et al. 2024a; Ruiz et al. 2023], virtual try-on [Choi et al. 2024; Luan et al. 2025; Wan et al. 2025], and style [Qi et al. 2024; Wu et al. 2021; Xing et al. 2024]. Despite the abundance of task-specific approaches, developing a unified framework for image customization remains a challenge.
116
+
117
+ Early researchComposer [Huang et al. 2023] jointly trains a diffusion model with multi-condition input, e.g., depth, color, sketch. Some methods [Qin et al. 2023; Zhao et al. 2023] train additional control blocks [Mou et al. 2024; Zhang et al. 2023] to support general spatial control on the generation result, which greatly saves training costs. However, their control ability is restricted to some simple spatial conditions, and the interactions between conditions are rigid and have control redundancy. Recently, the DiT [Peebles and Xie 2023] framework greatly scales up the performance of diffusion models. Based on DiT, OminiControl [Tan et al. 2024] proposes to integrate image conditions by unified sequence with diffusion latent. It can perform various customization tasks, e.g., identity, color, and layout. Despite its advantages, OminiControl is trained separately on different tasks, struggling to process multiple
118
+
119
+ conditions. Recently, OmniGen [Xiao et al. 2024a] trains a general generation control based on a pre-trained large language model [Abdin et al. 2024] (LLMs). UniReal [Chen et al. 2024b] achieves this through video generation pretraining followed by full-model posttraining. However, we argue that high-quality, multi-concept image customization cannot be achieved by merely leveraging the general capabilities of large language models like OmniGen or video models such as UniReal. Instead, it requires specialized architectural designs. Currently, the research community lacks an efficient and effective method specifically tailored for image customization under multi-concept and multi-conditional scenarios.
120
+
121
+ In this paper, we design a unified image customization approach based on a pre-trained DiT. With a low training cost and a single model, our method can support various conditions (e.g., identity, subject, try-on, and style) and enables interactions among different kinds of condition, as shown in 1. Specifically, we follow the unified sequence conditioning format introduced in OminiControl [Tan et al. 2024], and introduce a routing constraint on the internal representations of DiT during training. This ensures content fidelity and promotes the disentanglement of different control conditions. A placeholder strategy is also designed to enable control over the placement of conditions in the generated results. In addition, we construct large-scale training data covering multiple tasks and design a progressive training strategy. This enables the model to progressively acquire robust and generalized image customization capabilities.
122
+
123
+ In summery, this paper has the following contributions:
124
+
125
+ - We propose DreamO, a unified framework for image customization. It achieves various complex and multi-condition customization tasks by training a small set of additional parameters on a pre-trained DiT model.
126
+ - Based on representation correspondences within the diffusion model, we design a feature routing constraint to enhance consistency fidelity and enable effective decoupling in multi-condition scenarios.
127
+ - We introduce a progressive training strategy to facilitate convergence in multi-task and complex task settings. Moreover, we design a placeholder strategy to establish correspondence between textual descriptions and condition images.
128
+ - Extensive experiments demonstrate that our method not only produces high-quality results in various image customization tasks, but also exhibits strong flexibility in multi-condition scenarios.
129
+
130
+ # 2 RELATED WORKS
131
+
132
+ # 2.1 Diffusion Models
133
+
134
+ As a powerful generation paradigm, the diffusion model [Dhariwal and Nichol 2021; Ho et al. 2020] rapidly dominates the image generation community. Its high generation quality and stable performance have been successfully applied to various tasks, e.g., text-to-image generation [Nichol et al. 2022; Ramesh et al. 2022; Rombach et al. 2022; Sahara et al. 2022a] and image editing [Hertz et al. 2022; Meng et al. 2021; Mou et al. 2023; Yang et al. 2023]. Some strategies, such as latent diffusion [Rombach et al. 2022] and flow matching sampling [Lipman et al. 2022], are proposed to enhance the performance. Most early works [Nichol et al. 2022; Rombach et al. 2022]
135
+
136
+ ![](images/a957a9c5df9c4a4746bf7469038ac30d1efbbaaeade4b2095e4bf9c8f172f86a.jpg)
137
+ Fig. 2. Overview of our proposed DreamO, which can uniformly handle commonly used consistency-aware generation control.
138
+
139
+ utilize the UNet architecture as the diffusion model. Recently, the diffusion transformer [Peebles and Xie 2023] (DiT) architecture has emerged as a superior choice, offering improved performance through straightforward scalability.
140
+
141
+ # 2.2 Controllable Image Generation
142
+
143
+ Advancements in diffusion models drive the rapid development of controllable image generation. In this community, text is the most fundamental conditioning input, e.g., Stable Diffusion [Rombach et al. 2022], Imagen [Saharia et al. 2022b], and DALL-E2 [Ramesh et al. 2022]. To achieve accurate spatial control, some methods, e.g., ControlNet [Zhang et al. 2023] and T2I-Adapter [Mou et al. 2024], propose adding control modules on pre-trained diffusion models. UniControl [Qin et al. 2023; Zhao et al. 2023] propose to unify different spatial conditions with a joint condition input. In addition to spatial control, some unspatial conditions are also studied. The IP-Adapter [Ye et al. 2023] utilizes cross-attention to inject image prompt in the diffusion model to control some unspatial properties, e.g., identity and style. In addition to these representative works, other related attempts [Gal et al. 2022; He et al. 2025; Hua et al. 2023; Kumari et al. 2023; Li et al. 2023; Ma et al. 2024a,b] also help to broaden the scope of controllable image generation.
144
+
145
+ Recent advancements in DiT-based diffusion models further promote the development of controllable image generation. For instance, in-context LoRA [Huang et al. 2024b] and OminiControl [Tan et al. 2024] introduce a novel approach by concatenating all input tokens (i.e., text, image, and conditions) and training LoRA with task-specific datasets for various applications. Subsequently, OmniGen [Xiao et al. 2024a] and UniReal [Chen et al. 2024b] optimize the entire diffusion model in multiple stages on larger-scale training data, achieving improved understanding of input conditions.
146
+
147
+ # 2.3 Cross-attention Routing in Diffusion Models
148
+
149
+ Existing studies (e.g., Prompt-to-Prompt [Hertz et al. 2022]) demonstrate that text-visual cross-attention maps inherently establish spatial-semiotic correspondence between linguistic tokens and visual generation, i.e., the similarity response aligns with the spatial region of the corresponding subject in the generated result. Building upon this observation, UniPortrait [He et al. 2024] constrains the
150
+
151
+ influence region of condition features for identity-specific generation in multi-face scenarios, and AnyStory [He et al. 2025] further extends this approach to subject-driven generation. Some recent works [Yamaguchi and Yanai 2024] show that the cross-attention map in the DiT framework also exhibits spatial properties. In this paper, we explore routing constraints in the DiT framework.
152
+
153
+ # 3 METHOD
154
+
155
+ # 3.1 Preliminaries
156
+
157
+ The Diffusion Transformer (DiT) model [Peebles and Xie 2023] employs a transformer as the denoising network to refine diffusion latent. Specifically, in the input, the 2D image latent $\mathbf{z}_t \in \mathbb{R}^{c \times w \times h}$ is patchified into a sequence of 1D tokens $\mathbf{z}_t \in \mathbb{R}^{c \times \left(\frac{w}{p} \times \frac{h}{p}\right)}$ , where $(w, h)$ is spatial size, $c$ is the number of channels, and $p$ is the patch size. The image and text tokens are concatenated and processed by the DiT model in a unified manner. Apart from model architectures, more efficient sampling strategies (e.g., Flow Matching [Lipman et al. 2022]) are also proposed. Unlike DDPM [Ho et al. 2020], Flow Matching conducts the forward process by linearly interpolating between noise and data in a straight line. At the time step $t$ , latent $\mathbf{z}_t$ is defined as: $\mathbf{z}_t = (1 - t)\mathbf{z}_0 + t\epsilon$ , where $\mathbf{z}_0$ is the clean image, and $\epsilon \in \mathcal{N}(0,1)$ is the Gaussian noise. The model is trained to directly regress the target velocity given the noised latent $\mathbf{z}_t$ , timestep $t$ , and condition $y$ . The objective is to minimize the mean squared error:
158
+
159
+ $$
160
+ L _ {d i f f} = E \left[ \left\| (\mathbf {z} _ {0} - \epsilon) - \mathcal {V} _ {\theta} (\mathbf {z} _ {t}, t, y) \right\| ^ {2} \right], \tag {1}
161
+ $$
162
+
163
+ where $\mathcal{V}_{\theta}$ refers to the diffusion model. The DiT framework and Flow Matching are widely used in some recent diffusion models, such as Stable Diffusion3 [Esser et al. 2024] and Flux [flu 2023a].
164
+
165
+ # 3.2 Overview
166
+
167
+ An overview of our method is presented in Fig. 2. Specifically, we utilize the Flux-1.0-dev [flu 2023a] as the base model to build a unified framework for image customization, e.g., style, identity, subject appearance, and try-on. Given $n$ condition images $\mathbf{C} = \{\mathbf{C}_1,\dots,\mathbf{C}_n\}$ , we first reuse the VAE [Kingma et al. 2013] of Flux to encode the condition image to the same latent space as noisy latent. Note that the size of the condition image is flexible. Higher resolutions are
168
+
169
+ ![](images/b7ed295f87b9d59e7150dcf77881c1c4305daaeed8f846da0a18fc0b8447e890.jpg)
170
+ Fig. 3. Visualization of cross-attention maps in subject-driven image generation. The first row shows results from a model trained without routing constraints, while the second row presents results from a model trained with routing constraints.
171
+
172
+ recommended for detail-rich images to preserve clarity, while lower resolutions are sufficient for images with fewer details, thereby reducing compression costs. Then, all tokens (i.e., image, text, condition) are concatenated along the sequence dimension and fed into Flux. To enable the model to incorporate the condition input, we introduce a condition mapping layer at the input of Flux. The position embedding (PE) of condition tokens is aligned with that of the noisy latent using Rotary Position Embedding (RoPE) [Su et al. 2024]. Inspired by non-overlapping position embedding in OminiControl [Tan et al. 2024], we extend these embeddings along the diagonal in a similar fashion. In addition, we introduce a trainable and index-wise condition embedding (CE) $\mathbb{R}^{10\times c}$ , which is directly added to condition tokens. Following OminiControl [Tan et al. 2024], we integrate Low-Rank Adaptation (LoRA) [Ryu 2023] modules into Flux as trainable parameters.
173
+
174
+ # 3.3 Routing Constraint
175
+
176
+ Inspired by UniPortrait [He et al. 2024] and AnyStory [He et al. 2025], in this paper, we design routing constraint in the DiT framework for general image customization tasks. As illustrated in Fig. 2, within the condition-guided framework, cross-attention exists between condition images and the generation result:
177
+
178
+ $$
179
+ \mathbf {M} = \frac {\mathbf {Q} _ {\text {c o n d} , i} \mathbf {K} _ {\text {i m g}} ^ {T}}{\sqrt {d}}, \tag {2}
180
+ $$
181
+
182
+ where $\mathbf{Q}_{cond,i} \in \mathbb{R}^{l_{cond,i} \times c}$ refers to the condition tokens of the $i$ -th condition image. $\mathbf{K}_{img} \in \mathbb{R}^{l \times c}$ is the tokens of noisy image latent. The cross-attention map $\mathbf{M} \in \mathbb{R}^{l_{cond,i} \times l}$ is a dense similarity between the $i$ -th condition image and the generation result. To obtain the global response of the condition image in different locations of the generated output, we average the dense similarity matrix along the $l_{cond,i}$ dimension, resulting in a response map $\mathbf{M} \in \mathbb{R}^l$ , representing the global similarity of the condition image on the generated result. To constrain the image-to-image attention focus on the specific subject, MSE loss is employed to optimize the attention within DiT across condition images and the generation result:
183
+
184
+ $$
185
+ L _ {\text {r o u t e}} = \frac {1}{n _ {c} \times n _ {l}} \sum_ {j = 0} ^ {n _ {I} - 1} \sum_ {i = 0} ^ {n _ {c} - 1} \left\| \mathbf {M} _ {i} ^ {j} - \mathbf {M} _ {\text {t a r g e t}, i} \right\| _ {2} ^ {2}, \tag {3}
186
+ $$
187
+
188
+ where $i$ and $j$ refer to the condition index and layer index. $n_c$ and $n_l$ is the number of conditions and number of layers, respectively. $\mathbf{M}_{\text{target}}$ refers to the subject mask for the target image. As shown in
189
+
190
+ ![](images/e80fc8ebdec074a32bc24d46b6182ac8316245a3fa6446c13d883e513c9121fc.jpg)
191
+ Fig. 4. The progressive training pipeline of our method. Left column shows the three training stages of our method. Right column shows the generation capability after the training of each stage.
192
+
193
+ the second row of Fig. 3, after training with routing constraint, the attention of the condition image clearly focuses on the target subject, and the result shows improved consistency with the reference image in terms of details. In addition to improved consistency, this strategy also helps decoupling in multi-reference cases.
194
+
195
+ In addition to image-to-image routing constraint, we also design placeholder-to-image routing constraint to establish correspondences between textual descriptions and condition inputs. Specifically, for the $i$ -th condition, we append a placeholder [ref#i] after the corresponding instance name, e.g., "A women from [ref#1] and a woman from [ref#2] is walking in the park". During the training for multi-condition tasks, we calculate the similarity between the conditional image tokens and the placeholder tokens. The routing constraint ensures that the similarity between $\mathbf{C}_i$ and [ref#i] is 1, while it is 0 for all other pairs:
196
+
197
+ $$
198
+ L _ {h o l d e r} = \frac {1}{n _ {c}} \sum_ {i = 0} ^ {n _ {c} - 1} \left\| S o f t m a x \left(\mathbf {Q} _ {\text {c o n d}, i} \times \mathbf {K} _ {\text {t e x t}, i} ^ {T}\right) - \mathbf {B} _ {i} \right\| _ {2} ^ {2}, \tag {4}
199
+ $$
200
+
201
+ where $\mathbf{K}_{text,i}$ refers to the text feature of [ref#i]. $\mathbf{B}_i$ is a binary matrix, where the value is 1 when the placeholder matches the condition image, and 0 otherwise.
202
+
203
+ The final loss function of our method is defined as:
204
+
205
+ $$
206
+ L = \lambda_ {\text {d i f f}} \cdot L _ {\text {d i f f}} + \lambda_ {\text {r o u t e}} \cdot L _ {\text {r o u t e}} + \lambda_ {\text {h o l d e r}} \cdot L _ {\text {h o l d e r}}, \tag {5}
207
+ $$
208
+
209
+ where $\lambda_{diff}$ , $\lambda_{route}$ and $\lambda_{holder}$ are loss weights. To allow the model to handle regular text input, we introduce normal text without placeholders with a probability of $50\%$ and discard $L_{holder}$ accordingly. Note that $L_{route}$ and $L_{holder}$ do not incur significant additional computational overhead during training (2.5s/iter vs. 3s/iter).
210
+
211
+ # 3.4 Training Data Construction
212
+
213
+ To achieve generalized image customization, we collect training data, covering a wide range of tasks.
214
+
215
+ Identity paired data. Since high-quality identity paired data [Li et al. 2024] is difficult to collect from the Internet, we adopt the open-source ID customization method PuLID [Guo et al. 2024] for dataset construction. Specifically, we generate two images of the
216
+
217
+ ![](images/19a5b372324eb3eb330e76b773d7bde6ca02dc4952249c8a77ceb8c6829803a2.jpg)
218
+
219
+ ![](images/512df60b48f29d339264abecf08830607357111e99bc45ac5d7dd6f4c81337d7.jpg)
220
+
221
+ ![](images/aeb2b636c720541aaa9b57d12f25842ba7046487006e746bac70140cf3860a0e.jpg)
222
+
223
+ ![](images/b3f856dddb811ccd38bba81c312339a9127f5f73f20ce41272e38f90ef08355e.jpg)
224
+ Fig. 5. Visual comparison between our DreamO and other methods.
225
+
226
+ same identity using PuLID-FLUX, which then serve as mutual references. We also provide PuLID-SDXL with a reference face image and a text prompt describing the desired style to produce stylized training pairs. Finally, we collect 150K photorealistic data and 60K stylized identity data.
227
+
228
+ Subject-driven data. For single-subject-driven image customization, we utilize the Subject200K [Tan et al. 2024] dataset as part of
229
+
230
+ training data. To rectify the absence of character-related conditions, we collect 100K paired character-related data through retrieval. For multi-subject-driven image customization, we construct some two-column images through concatenation on the Subject200K dataset. In addition, we employ X2I-subject [Xiao et al. 2024a] dataset in multi-subject-driven training. To improve human-driven generation, we develop a pipeline similar to MovieGen [Polyak et al. 2025].
231
+
232
+ Starting with a long-video dataset, we apply content-aware scene detection to extract short clips. Mask2Former [Cheng et al. 2022] is used to generate human masks for key frames and perform object tracking. For cross-clip instance matching, we use SigLip [Zhai et al. 2023] embeddings and clustering.
233
+
234
+ Try-on data. We create a paired try-on dataset from two sources. One part consists of paired model and clothing images collected directly from the Web. For the other, we first crawl high-quality model images as ground truth, then use image segmentation [Jin 2023; Jin et al. 2024] to extract clothing items and generate the corresponding pairs. All images are manually filtered to remove low-quality samples, resulting in a dataset of 500K try-on pairs.
235
+
236
+ Style-driven data. This paper tackles two style transfer tasks: (1) style reference control with text descriptions of content, and (2) style reference with content reference images. For the first task, we use an internal style customization model based on SDXL to generate images with the same style but different content from two distinct prompts. For the second task, training requires style reference images, content reference images, and target images, where the target image must match both the style and content structure of the reference images. Based on the type 1 dataset, we produce the content reference for each style image using Canny-guided Flux [flu 2023c]. More details are provided in the Appendix.
237
+
238
+ Routing mask extraction. To obtain the labels for the routing constraint (i.e., Eq.3), we use LISA[Lai et al. 2024] to extract mask of object conditioned on the text descriptions. In certain complex datasets, we employ InternVL [Chen et al. 2024a] to generate description of the target object. More details are provided in the Appendix.
239
+
240
+ Although the training data is constructed separately for different tasks, we observe the emergence of some cross-task capabilities. For instance, the model can customize the combination of ID and Try-on (as shown in Fig. 12), which does not exist in the training data.
241
+
242
+ # 3.5 Progressive Training Process
243
+
244
+ In experiments, we find that training directly on all data makes convergence difficult. This is mainly due to the limited capacity of LoRA [Ryu 2023] optimization, making it difficult for the model to capture task-specific capabilities under complex data distributions. In addition to convergence, the quality of the output after training deviates from the original prior of Flux [flu 2023a]. This divergence is caused by the impact of some low-quality training samples.
245
+
246
+ To address these issues, we design a progressive training strategy that allows the model to smoothly converge across different tasks while mitigating the influence of training data on the generation prior of Flux. The training pipeline is shown in Fig. 4. Specifically, we first optimize the model on subject-driven training data to initiate the model with a consistency-preserving capability. Note that the Subject200K [Tan et al. 2024] training data is generated by the base model (i.e., Flux), thus shares a similar distribution with the model generation space, which facilitates fast convergence. Since the X2I-subject [Xiao et al. 2024a] dataset is synthetically generated by MS-Diffusion [Wang et al. 2024b], a lot of training samples contain undesired artifacts and distortions. Therefore, during this warm-up stage, the two-column Subject200K images described in
247
+
248
+ Sec. 3.4 are also utilized as part of the training data to facilitate rapid convergence of the multi-reference control. The right part of Fig. 4 illustrates that after the first training stage, the model acquires an initial subject-driven generation capability and presents strong text-following performance. In the second training stage, we incorporate all the training data and perform full-data tuning. This allows the model to further converge on all subtasks defined in this work.
249
+
250
+ After the second stage of full-data training, we observe that the generation quality is heavily influenced by the training data, particularly by low-quality training samples. To realign the generation quality with the generative prior of Flux, we design an image quality refinement training stage. Specifically, we utilize Flux to generate around $40K$ training samples. During training, we use the original images as references to guide the model in reconstructing itself. To prevent copy-paste effects, we drop $95\%$ tokens of reference images. After a shot-time optimization, the generation quality improved significantly, achieving alignment with the generation prior of Flux.
251
+
252
+ # 4 EXPERIMENT
253
+
254
+ # 4.1 Implementation Details
255
+
256
+ In this paper, we adopt Flux-1.0-dev as the base model. The rank of LoRA [Ryu 2023] is set as 128, resulting in a total parameter increase of 478M. During training, we employ the Adam [Loshchilov and Hutter 2017] optimizer with a learning rate of 4e-5 and train on 8 NVIDIA A100 80G GPUs. The batch size is set as 8. The first training stage consists of 20K iterations, followed by 90K iterations in the second stage, and finally 3K iterations in the last training stage. In inference, we use Flux-Turbo [flu 2023b] for acceleration, enabling the generation of $1024 \times 1024$ results within 10s. Unless specified, all results in this paper are based on the Turbo model. Some of the example inputs are processed using BEN2 [Meyer and Spruyt 2025] to remove background.
257
+
258
+ # 4.2 Qualitative Comparison
259
+
260
+ To validate the performance of DreamO, we conduct comparisons with recent state-of-the-art methods across multiple subtasks. The visual comparison is shown in Fig. 5. The first part presents a comparison between DreamO and SOTA identity-customization methods i.e., PhotoMaker [Li et al. 2024], InstantID [Wang et al. 2024a], PuLID [Guo et al. 2024]. The results demonstrate that DreamO can inject identity information with high fidelity across various scenes, while offering impressive flexibility for customization.
261
+
262
+ The second part compares DreamO with recent subject-customization methods, including single-task frameworks (i.e., MS-Diffusion [Wang et al. 2024b]) and unified generation frameworks (i.e., OmniGen [Xiao et al. 2024a], OminiControl [Tan et al. 2024]). The results demonstrate that DreamO achieves higher subject fidelity and better text consistency in both single-subject and multi-subject scenarios.
263
+
264
+ The third part shows comparison in virtual try-on, indicating that DreamO can effectively place clothing in scenes that align with the provided text with high fidelity to the reference images. Unlike methods like IMAGDressing, which produce high-fidelity clothing but lose text alignment, DreamO maintains both.
265
+
266
+ The last part presents the comparison between DreamO and recent style-customization methods, i.e., StyleShot [Gao et al. 2024],
267
+
268
+ Table 1. Quantitative evaluation of subject-driven customization.
269
+
270
+ <table><tr><td rowspan="2"></td><td colspan="4">Single-subject Customization</td><td colspan="3">Multi-subject Customization</td></tr><tr><td>MS-Diffusion</td><td>OmniGen</td><td>OminiControl</td><td>DreamO</td><td>MS-Diffusion</td><td>OmniGen</td><td>DreamO</td></tr><tr><td>CLIP-sim ↑</td><td>0.8989</td><td>0.8824</td><td>0.8220</td><td>0.9150</td><td>0.7686</td><td>0.7605</td><td>0.7775</td></tr><tr><td>DINO-sim ↑</td><td>0.7746</td><td>0.7582</td><td>0.6089</td><td>0.8056</td><td>0.6113</td><td>0.5646</td><td>0.6253</td></tr><tr><td>Text-sim ↑</td><td>31.78</td><td>31.74</td><td>31.12</td><td>31.92</td><td>31.34</td><td>29.55</td><td>31.46</td></tr></table>
271
+
272
+ Table 2. Quantitative evaluation of text-driven style transfer. The Text-sim↑ is computed by the Cosine similarity between CLIP [Radford et al. 2021] image embedding and CLIP text embedding.
273
+
274
+ <table><tr><td></td><td>StyleAlign</td><td>StyleShot</td><td>InstantStyle</td><td>DEADiff</td><td>CSGO</td><td>DreamO</td></tr><tr><td>Style-sim ↑</td><td>0.7122</td><td>0.6922</td><td>0.6988</td><td>0.7269</td><td>0.7296</td><td>0.7340</td></tr><tr><td>Text-sim ↑</td><td>0.2566</td><td>0.2693</td><td>0.2721</td><td>0.2656</td><td>0.2701</td><td>0.2750</td></tr></table>
275
+
276
+ Table 3. Quantitative evaluation of identity-driven customization.
277
+
278
+ <table><tr><td></td><td>PhotoMaker</td><td>InstantID</td><td>PuLID</td><td>DreamO</td></tr><tr><td>Face-sim ↑</td><td>0.212</td><td>0.590</td><td>0.5829</td><td>0.607</td></tr><tr><td>Text-sim ↑</td><td>0.2520</td><td>0.2294</td><td>0.2534</td><td>0.2570</td></tr></table>
279
+
280
+ Table 4. Quantitative evaluation of try-on.
281
+
282
+ <table><tr><td></td><td colspan="2">MagCloth IMAGDressing</td><td>OmniGen</td><td>OminiControl</td><td>DreamO</td></tr><tr><td>CLIP-sim ↑</td><td>0.5977</td><td>0.8405</td><td>0.7265</td><td>0.7065</td><td>0.7613</td></tr><tr><td>Text-sim ↑</td><td>30.17</td><td>17.74</td><td>27.83</td><td>28.79</td><td>30.47</td></tr></table>
283
+
284
+ StyleAlign [Wu et al. 2021], InstantStyle [Wang et al. 2024c], DeaDiff [Qi et al. 2024], and CSGO [Xing et al. 2024]. It can be observed that DreamO has weaker content intrusion, better text alignment, and higher style fidelity in the generated results.
285
+
286
+ # 4.3 Quantitative Comparison
287
+
288
+ In addition to qualitative comparison, we conduct quantitative comparisons for each task. We present the comparison of identity customization in Tab. 3, which is evaluated on Unsplash-50 [Gal et al. 2024]. Here, we provide 9 prompts for each face. Following PuLID [Guo et al. 2024], the Face-Sim represents the ID cosine similarity, with ID embeddings extracted by CurricularFace [Huang et al. 2020]. We also compute the CLIP cosine similarity between the generated result and the prompt to measure the text-following ability of different methods. As can be seen, our DreamO shows better face similarity and text-based customization ability.
289
+
290
+ Tab. 1 presents the quantitative comparison in single- and multi-subject customization. We use DreamBench [Ruiz et al. 2023] as the testset of single-subject customization. For multi-subject customization, we randomly select 20 pairs from DreamBench and provide 25 prompts for each. During testing, we generate four images with different seeds for each test sample. Here, we calculate the CLIP cosine similarity and Dino [Caron et al. 2021] cosine similarity between the generated result and the reference images as a measure of subject consistency. To improve accuracy, we remove the background of the generated results and then calculate similarity. Additionally, we
291
+
292
+ Table 5. Ablation study of different model settings in multi-subject-driven customization.
293
+
294
+ <table><tr><td></td><td>w/o CE</td><td>w/o RC</td><td>w/o PT</td><td>DreamO</td></tr><tr><td>CLIP-sim ↑</td><td>0.7697</td><td>0.7448</td><td>0.7349</td><td>0.7775</td></tr><tr><td>DINO-sim ↑</td><td>0.6097</td><td>0.5540</td><td>0.5381</td><td>0.6253</td></tr><tr><td>Text-sim ↑</td><td>31.26</td><td>28.42</td><td>28.31</td><td>31.46</td></tr></table>
295
+
296
+ employ CLIP cosine similarity between the text description and the generated result as a measure of content alignment. Tab. 1 shows that our method outperforms others in subject consistency while demonstrating strong text-following ability.
297
+
298
+ The quantitative comparison of the try-on is presented in Tab. 4. We select 300 reference garments from VITON-HD [Choi et al. 2021] encompassing various styles and colors as the test data. During testing, we provide 10 prompts for each cloth. The CLIP cosine similarity between the generated result and reference cloth is employed to measure the try-on accuracy. Here, we only crop the cloth from the result to compute the CLIP-sim. The CLIP cosine similarity between the generated results and the prompt is employed to measure the text-following ability of different methods. The result in Tab. 4 shows the attractive performance of our DreamO. Although IMAGDressing [Shen et al. 2025] has higher CLIP-sim, it can only generate images with a white background (i.e., Fig. 5) with little text-following ability.
299
+
300
+ For style customization, we construct an evaluation dataset, containing 249 style images and 24 prompt. Each method generates $249 \times 24$ style customization results. We use the pre-trained CSD [Somepalli et al. 2024] to extract the style feature of generated results and reference images, and calculate the cosine similarity between them as a measure of style consistency. Furthermore, we compute the CLIP [Radford et al. 2021] similarity between stylized results and text descriptions as the metric of content consistency. The results are shown in Tab. 2, which demonstrate that our method has better performance in style consistency and content consistency.
301
+
302
+ # 4.4 User Study
303
+
304
+ In addition to the automatic evaluation metrics, we also conduct a user study for manual evaluation of different methods. Specifically, for each task (i.e., style, object, identity, and try-on customization), we assigned 6 test samples and invited 20 volunteers to rate on three aspects: text alignment, reference alignment, and image quality. The scoring range was set from 0 to 5, where a higher score indicates greater satisfaction. Fig. 9 shows that our DreamO achieves better performance in these three evaluation aspects.
305
+
306
+ ![](images/3b186c98eb21f754d119318ae4505a43cf224efab1432d1b2f309745e0bd26d0.jpg)
307
+
308
+ ![](images/0744fe79fd2b947138ea8f8826934b0d09f3393cbcac54c6325e4593e078cdb9.jpg)
309
+
310
+ ![](images/88ea109eb1ccbf381ca0aa3e52d19e4fc8614c363f1226a32695f7229c632802.jpg)
311
+ w/o Placeholder Loss
312
+
313
+ w Placeholder Loss
314
+ ![](images/87feb1edfe09a23211bbce95806941ffc02f28b39c9fb9d8d2a1c0ed450cb5d9.jpg)
315
+ "Two women walk in the park. The woman on the left is from [ref#1]. The woman on the right is from [ref#2]."
316
+
317
+ ![](images/ddd1bcbdf319393a0be6573195f4de87a309e8f2f4e4c7a88ff704eb801caa71.jpg)
318
+ w/o Placeholder Loss
319
+
320
+ ![](images/3f9e1f0507ce8abd48a60c47b843dca5cf96a834f02b50c5e45888c4019f1281.jpg)
321
+ w Placeholder Loss
322
+
323
+ ![](images/83aa1c7a0518f1b56ac9817c9af2ed29f5d037a45aaa3f7c69c00cf2a36d6728.jpg)
324
+ w/o routing constrain w routing constrain
325
+ A woman walks in the park
326
+
327
+ ![](images/9b995ccb1ac20db173f09243ec34675af732723ed93d62cd0372b311174c4936.jpg)
328
+ w/o routing constrain
329
+
330
+ ![](images/9451fc131c15f6204b3bd192f65fea00354fa43b91f156e8fe8b7823cea3216c.jpg)
331
+ w routing constrain
332
+ vs on the table!
333
+
334
+ ![](images/0f7ddb68b6175778bbefe5618e3f4f6ef9f17d91089c4a31b3703ec1b1f38160.jpg)
335
+ Fig. 7. The ablation study of routing constraint in our proposed DreamO.
336
+ w/o progressive training
337
+ "A boy hold a box, and he is laughing. In his side, another Thermos Cup on the table."
338
+ Fig. 8. The ablation study of progressive training in our proposed DreamO.
339
+
340
+ ![](images/eb55ab253fa27aae55de741514d7fd95995bac5ad5db381d1ec2ec5ebc929d8b.jpg)
341
+ w/o quality tuning
342
+ "In front of the grocery store, the girl is wearing a green dress and a polka dot headscarf"
343
+
344
+ ![](images/78d1e4bd0466cad344cac7d4141bd55bd2e9cb79871f51594e29aada63a1240a.jpg)
345
+ Full Implementation
346
+
347
+ # 4.5 Ablation Study
348
+
349
+ Routing constraint. In this paper, we introduce a routing constraint into DiT training to enhance generation fidelity and facilitate the decoupling of multi-condition control. To evaluate its effectiveness, we ablate the routing constraint during training, with results shown in Fig. 7. In single-condition generation, its removal leads to degraded reference fidelity, e.g., the clothing color becomes inconsistent with the reference. In multi-condition settings, it causes condition coupling, e.g., features of the two toys are crossed. These results confirm that the routing constraint improves the fidelity and disentanglement of different conditions.
350
+
351
+ Progressive training. To enable the model to better converge on all sub-tasks under complex data distributions and to rectify the impact of training data distribution on generation quality, we design a progressive training strategy. The effectiveness of this strategy is demonstrated in Fig. 8. One can see that directly training the model on all datasets leads to suboptimal convergence, particularly in complex tasks such as multi-subject consistency. Warming up on a smaller and easier-to-learn dataset (e.g., Subject200K [Tan et al. 2024]) before joint training improves convergence, but the generation quality is easily influenced by the training data distribution, deviating from the generation priors of Flux. By introducing an image quality tuning stage, the model can produce higher-quality generation results.
352
+
353
+ ![](images/8869e13e02c742ca66cab98f4bbd2b65a66bc17b61193d446b30ba1e56f3649f.jpg)
354
+ Fig. 6. The ablation study of the placeholder-to-image routing constraint.
355
+
356
+ ![](images/59708c463d8b169038e1962f79cb519893304ca0a2b11d0b59026a5bfe791ce1.jpg)
357
+
358
+ ![](images/8f9bef8c1c0048754e93ca438422aa05e02fc97d2186ec338f23f0c557d87e83.jpg)
359
+ Fig. 9. The user study of different methods.
360
+
361
+ ![](images/6759b15ae1941017d09b2e234f99ae239f6ad90c61b12df9930421dd0689ad11.jpg)
362
+
363
+ **Placeholder-to-image routing constraint.** As demonstrated in Eq. 4, this paper designs a placeholder-to-image routing constraint to build the routing relationship between placeholders and specific images. Fig. 6 shows the effect of this loss term. It can be seen that without this loss, placeholders struggle to precisely control their corresponding images. After applying this loss, placeholders can bind to specific reference objects, enabling individual control over particular objects during multi-subject customization.
364
+
365
+ Quantitative results. In addition to the visual comparison, we show the quantitative results of the ablation study, as shown in Tab. 5. The experiment is conducted on multi-subject-driven customization. It can be seen that not using the routing constraint (RC) and progressive training strategy (PT) significantly impacts performance, leading to a decrease in the reference consistency and text following. We also study the role of condition embedding (CE), and its absence results in a decline in the reference consistency.
366
+
367
+ # 5 CONCLUSION
368
+
369
+ In this study, we introduce DreamO, a unified framework designed for generalized image customization across diverse condition types (e.g., identity, style, subject, and try-on) within a single pre-trained DiT architecture. To facilitate this, we construct a large-scale training dataset. By embedding all condition types into the DiT input sequence and incorporating a feature routing constraint, DreamO achieves high-fidelity consistency while effectively disentangling heterogeneous control signals. In addition, we design a progressive training strategy that enables the model to incrementally acquire diverse control capabilities under complex data distributions, while
370
+
371
+ maintaining the image quality inherent to the base model. Comprehensive experiments demonstrate that DreamO excels in performing a wide range of image customization tasks with high-quality results.
372
+
373
+ # REFERENCES
374
+
375
+ 2023a. "https://github.com/black-forest-labs/flux?tab=README-ov-file". "https://github.com/black-forest-labs/flux?tab=README-ov-file"
376
+ 2023b. https://huggingface.co/alamama-creative/FLUX.1-Turbo-Alpha. https://huggingface.co/alamama-creative/FLUX.1-Turbo-Alpha
377
+ 2023c. https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev. https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev
378
+ Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219 (2024).
379
+ Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. 2021. Emerging Properties in Self-Supervised Vision Transformers. In Proceedings of the International Conference on Computer Vision (ICCV).
380
+ Xi Chen, Zhifei Zhang, He Zhang, Yuqian Zhou, Soo Ye Kim, Qing Liu, Yijun Li, Jianming Zhang, Nanxuan Zhao, Yilin Wang, et al. 2024b. UniReal: Universal Image Generation and Editing via Learning Real-world Dynamics. arXiv preprint arXiv:2412.07774 (2024).
381
+ Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. 2024a. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271 (2024).
382
+ Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. 2022. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 1290-1299.
383
+ Seunghwan Choi, Sunghyun Park, Minsoo Lee, and Jaegul Choo. 2021. Viton-hd: High-resolution virtual try-on via misalignment-aware normalization. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 14131-14140.
384
+ Yisol Choi, Sangkyung Kwak, Kyungmin Lee, Hyungwon Choi, and Jinwoo Shin. 2024. Improving diffusion models for authentic virtual try-on in the wild. In European Conference on Computer Vision. Springer, 206-235.
385
+ Prafulla Dhariwal and Alexander Nichol. 2021. Diffusion models beat gans on image synthesis. Advances in neural information processing systems 34 (2021), 8780-8794.
386
+ Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. 2024. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first international conference on machine learning*.
387
+ Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. 2022. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022).
388
+ Rinon Gal, Or Lichter, Elad Richardson, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. 2024. Lcm-lookahead for encoder-based text-to-image personalization. In European Conference on Computer Vision. Springer, 322-340.
389
+ Junyao Gao, Yanchen Liu, Yanan Sun, Yinhao Tang, Yanhong Zeng, Kai Chen, and Cairong Zhao. 2024. Styleshot: A snapshot on any style. arXiv preprint arXiv:2407.01414 (2024).
390
+ Zinan Guo, Yanze Wu, Chen Zhuowei, Peng Zhang, Qian He, et al. 2024. Pulid: Pure and lightning data customization via contrastive alignment. Advances in Neural Information Processing Systems 37 (2024), 36777-36804.
391
+ Junjie He, Yifeng Geng, and Liefeng Bo. 2024. UniPortrait: A Unified Framework for Identity-Preserving Single-and Multi-Human Image Personalization. arXiv preprint arXiv:2408.05939 (2024).
392
+ Junjie He, Yuxiang Tuo, Binghui Chen, Chongyang Zhong, Yifeng Geng, and Liefeng Bo. 2025. AnyStory: Towards Unified Single and Multiple Subject Personalization in Text-to-Image Generation. arXiv preprint arXiv:2501.09503 (2025).
393
+ Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. 2022. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022).
394
+ Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems 33 (2020), 6840-6851.
395
+ Miao Hua, Jiawei Liu, Fei Ding, Wei Liu, Jie Wu, and Qian He. 2023. Dreamtuner: Single image is enough for subject-driven generation. arXiv preprint arXiv:2312.13691 (2023).
396
+ Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou. 2023. Composer: Creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778 (2023).
397
+ Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. 2024b. In-context lora for diffusion transformers. arXiv preprint arXiv:2410.23775 (2024).
398
+
399
+ Mengqi Huang, Zhendong Mao, Mingcong Liu, Qian He, and Yongdong Zhang. 2024a. RealCustom: narrowing real text word for real-time open-domain text-to-image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7476-7485.
400
+ Yuge Huang, Yuhan Wang, Ying Tai, Xiaoming Liu, Pengcheng Shen, Shaoxin Li, Jilin Li, and Feiyue Huang. 2020. Curricularface: adaptive curriculum learning loss for deep face recognition. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 5901-5910.
401
+ Zhenchao Jin. 2023. Sssegmentation: An open source supervised semantic segmentation toolbox based on pytorch. arXiv preprint arXiv:2305.17091 (2023).
402
+ Zhenchao Jin, Xiaowei Hu, Lingting Zhu, Luchuan Song, Li Yuan, and Lequan Yu. 2024. IDRNet: Intervention-driven relation network for semantic segmentation. Advances in Neural Information Processing Systems 36 (2024).
403
+ Diederik P Kingma, Max Welling, et al. 2013. Auto-encoding variational bayes.
404
+ Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. 2023. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 1931-1941.
405
+ Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. 2024. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 9579-9589.
406
+ Dongxu Li, Junnan Li, and Steven Hoi. 2023. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. Advances in Neural Information Processing Systems 36 (2023), 30146-30166.
407
+ Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, Ming-Ming Cheng, and Ying Shan. 2024. Photomaker: Customizing realistic human photos via stacked id embedding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 8640-8650.
408
+ Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. 2022. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747 (2022).
409
+ Ilya Loshchilov and Frank Hutter. 2017. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017).
410
+ Junsheng Luan, Guangyuan Li, Lei Zhao, and Wei Xing. 2025. MC-VTON: Minimal Control Virtual Try-On Diffusion Transformer. arXiv preprint arXiv:2501.03630 (2025).
411
+ Jian Ma, Junhao Liang, Chen Chen, and Haoran Lu. 2024a. Subject-diffusion: Open domain personalized text-to-image generation without test-time fine-tuning. In ACM SIGGRAPH 2024 Conference Papers. 1-12.
412
+ Yuhang Ma, Wenting Xu, Jiji Tang, Qinfeng Jin, Rongsheng Zhang, Zeng Zhao, Changjie Fan, and Zhipeng Hu. 2024b. Character-Adapter: Prompt-Guided Region Control for High-Fidelity Character Customization. arXiv preprint arXiv:2406.16537 (2024).
413
+ Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, and Stefano Ermon. 2021. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073 (2021).
414
+ Maxwell Meyer and Jack Spruyt. 2025. BEN: Using Confidence-Guided Matting for Dichotomous Image Segmentation. arXiv preprint arXiv:2501.06230 (2025).
415
+ Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. 2023. Dragon-diffusion: Enabling drag-style manipulation on diffusion models. arXiv preprint arXiv:2307.02421 (2023).
416
+ Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. 2024. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence. 4296-4304.
417
+ Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. 2022. GLIDE: Towards Photorealistic Image Generation and Editing with Text-Guided Diffusion Models. In International Conference on Machine Learning. PMLR, 16784-16804.
418
+ William Peebles and Saining Xie. 2023. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision. 4195-4205.
419
+ Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, David Yan, Dhruv Choudhary, Dingkang Wang, Geet Sethi, Guan Pang, Haoyu Ma, Ishan Misra, Ji Hou, Jialiang Wang, Kiran Jagadeesh, Kunpeng Li, Luxin Zhang, Mannat Singh, Mary Williamson, Matt Le, Matthew Yu, Mitesh Kumar Singh, Peizhao Zhang, Peter Vajda, Quentin Duval, Rohit Girdhar, Roshan Sumbaly, Sai Saketh Rambhatla, Sam Tsai, Samaneh Azadi, Samyak Datta, Sanyuan Chen, Sean Bell, Sharadh Ramaswamy, Shelly Sheynin, Siddharth Bhattacharya, Simran Motwani, Tao Xu, Tianhe Li, Tingbo Hou, Wei-Ning Hsu, Xi Yin, Xiaoliang Dai, Yaniv Taigman, Yaqiao Luo, Yen-Cheng Liu, Yi-Chiao Wu, Yue Zhao, Yuval Kirstain, Zecheng He, Zijian He, Albert Pumarola, Ali Thabet, Artsiomi Sanakoyeu, Arun Mallya, Baishan Guo, Boris Araya, Breena Kerr, Carleigh Wood, Ce Liu, Cen Peng, Dimitry Vengertsev, Edgar Schonfeld, Elliot Blanchard, Felix Juefei-Xu, Fraylie Nord, Jeff Liang, John Hoffman, Jonas Kohler, Kaolin Fire, Karthik Sivakumar, Lawrence Chen, Licheng Yu, Luya Gao, Markos Georgopoulos, Rashel Moritz, Sara K. Sampson, Shikai Li, Simone Parmeggiani, Steve Fine, Tara Fowler, Vladan Petrovic, and Yuming Du. 2025. Movie Gen: A Cast of Media Foundation Models. arXiv:2410.13720 [cs.CV] https://arxiv.org/abs/2410. 13720
420
+
421
+ Tianhao Qi, Shancheng Fang, Yanze Wu, Hongtao Xie, Jiawei Liu, Lang Chen, Qian He, and Yongdong Zhang. 2024. Deadiff: An efficient stylization diffusion model with disentangled representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8693-8702.
422
+ Can Qin, Shu Zhang, Ning Yu, Yihao Feng, Xinyi Yang, Yingbo Zhou, Huan Wang, Juan Carlos Niebles, Caiming Xiong, Silvio Savarese, et al. 2023. Unicontrol: A unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147 (2023).
423
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. 8748-8763.
424
+ Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. 2022. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022).
425
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10684-10695.
426
+ Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. 2023. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 22500-22510.
427
+ Simo Ryu. 2023. Low-rank adaptation for fast text-to-image diffusion fine-tuning.
428
+ Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. 2022a. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487 (2022).
429
+ Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. 2022b. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems 35 (2022), 36479-36494.
430
+ Fei Shen, Xin Jiang, Xin He, Hu Ye, Cong Wang, Xiaoyu Du, Zechao Li, and Jinhui Tang. 2025. Imagdressing-v1: Customizable virtual dressing. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 39. 6795-6804.
431
+ Growthami Somepalli, Anubhav Gupta, Kamal Gupta, Shramay Palta, Micah Goldblum, Jonas Geiping, Abhinav Shrivastava, and Tom Goldstein. 2024. Measuring Style Similarity in Diffusion Models. arXiv preprint arXiv:2404.01292 (2024).
432
+ Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. 2024. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing 568 (2024), 127063.
433
+ Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. 2024. OminiControl: Minimal and Universal Control for Diffusion Transformer. arXiv preprint arXiv:2411.15098 (2024).
434
+ Zhenchen Wan, Dongting Hu, Weilun Cheng, Tianxi Chen, Zhaoqing Wang, Feng Liu, Tongliang Liu, Mingming Gong, et al. 2025. MF-VITON: High-Fidelity Mask-Free Virtual Try-On with Minimal Input. arXiv preprint arXiv:2503.08650 (2025).
435
+ Haofan Wang, Matteo Spinelli, Qixun Wang, Xu Bai, Zekui Qin, and Anthony Chen. 2024c. Instantstyle: Free lunch towards style-preserving in text-to-image generation. arXiv preprint arXiv:2404.02733 (2024).
436
+ Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, and Yao Hu. 2024a. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519 (2024).
437
+ Xierui Wang, Siming Fu, Qihan Huang, Wanggui He, and Hao Jiang. 2024b. Ms-diffusion: Multi-subject zero-shot image personalization with layout guidance. arXiv preprint arXiv:2406.07209 (2024).
438
+ Zongze Wu, Yotam Nitzan, Eli Shechtman, and Dani Lischinski. 2021. Stylealign: Analysis and applications of aligned stylegan models. arXiv preprint arXiv:2110.11323 (2021).
439
+ Guangxuan Xiao, Tianwei Yin, William T Freeman, Frédo Durand, and Song Han. 2024b. Fastcomposer: Tuning-free multi-subject image generation with localized attention. International Journal of Computer Vision (2024), 1-20.
440
+ Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Chaofan Li, Shuting Wang, Tiejun Huang, and Zheng Liu. 2024a. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340 (2024).
441
+ Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, and Zechao Li. 2024. Csgo: Content-style composition in text-to-image generation. arXiv preprint arXiv:2408.16766 (2024).
442
+ Rento Yamaguchi and Keiji Yanai. 2024. Exploring Cross-Attention Maps in Multi-modal Diffusion Transformers for Training-Free Semantic Segmentation. In Proceedings of the Asian Conference on Computer Vision. 260-274.
443
+ Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. 2023. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 18381-18391.
444
+
445
+ Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. 2023. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721 (2023).
446
+ Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. 2023. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision. 11975-11986.
447
+ Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. 2023. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 3836-3847.
448
+ Shihao Zhao, Dongdong Chen, Yen-Chun Chen, Jianmin Bao, Shaozhe Hao, Lu Yuan, and Kwan-Yee K Wong. 2023. Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems 36 (2023), 11127-11150.
449
+
450
+ ![](images/7b2a214f6fbc2c844a88ddcd4c48bf5d306ef500f8d7673ec7ba6a80caed4eb0.jpg)
451
+ Fig. 10. The capability of our proposed DreamO in identity-driven image customization.
452
+
453
+ ![](images/6d6f9a7f0b69294238f41794540c52bba98be7b4630308b5e75e65c7530d1ab7.jpg)
454
+ Fig. 11. The capability of our proposed DreamO in subject-driven image customization.
455
+
456
+ ![](images/3f9b4e74553651c0ddbe13cd6321ae260acf1f75f8d0f0f4a5f81965d6a3dc84.jpg)
457
+ Fig. 12. The capability of our proposed DreamO in try-on image customization.
458
+
459
+ ![](images/0a37a84c34bcf97d4ae65a0d82bbcfc39b811e9371dcc7a49b0adb367688db03.jpg)
460
+ "Generate a same style image. Notre Dame de Paris"
461
+
462
+ ![](images/0432a07ef93557345d5410e55f0a84064c863687eec35f8deda9a56c7fbb8dce.jpg)
463
+ "Generate a same style image. A shark"
464
+
465
+ ![](images/133116da6d91d0149199e87f7ec2c280eab6112a92179f3df95f558eb764b680.jpg)
466
+ "Generate a same style image. A girl"
467
+
468
+ ![](images/e35a8feb22d971c09856f196921382d77ff4c657bd6659d81e1e1eb657b8d6f2.jpg)
469
+ "Generate a same style image. A dog"
470
+
471
+ ![](images/34a7b121779591bdaa232e74d32181ee34cb59ac1f76781e9cfd988ee9d3fd36.jpg)
472
+ "Generate a same style image. Eiffel Tower"
473
+
474
+ ![](images/8dbf023ee0aa591dc1781c1e102ced18c6364b930ef5ff055a9ae5a12fb123d9.jpg)
475
+ "Generate a same style image. A girl!"
476
+ Fig. 13. The capability of our proposed DreamO in style-driven image customization.
data/2025/2504_16xxx/2504.16915/images/035079926d46f2f8da63126c0b7a2b7d29bc12783cfe5d59e518e6b0f5722fd8.jpg ADDED

Git LFS Details

  • SHA256: ce85e78a5a3efc20d5ed2de69bfee4ea11a00be92c2d38c2775dc6dbb9f22886
  • Pointer size: 129 Bytes
  • Size of remote file: 4.1 kB
data/2025/2504_16xxx/2504.16915/images/0432a07ef93557345d5410e55f0a84064c863687eec35f8deda9a56c7fbb8dce.jpg ADDED

Git LFS Details

  • SHA256: c5be0887dc9c8fe0a0566bb5b69a96efd8b0469243ba6cb118bb97ad537ca840
  • Pointer size: 130 Bytes
  • Size of remote file: 16.9 kB
data/2025/2504_16xxx/2504.16915/images/07260139273f514c42045fe956c86ba1fd6e8fa298241d0ec6b0b61e9341e0d8.jpg ADDED

Git LFS Details

  • SHA256: dad099ea988c1df7186fe6b14ce35430b09aafe542333a1cca0de23901bc7f9c
  • Pointer size: 130 Bytes
  • Size of remote file: 12.1 kB
data/2025/2504_16xxx/2504.16915/images/0744fe79fd2b947138ea8f8826934b0d09f3393cbcac54c6325e4593e078cdb9.jpg ADDED

Git LFS Details

  • SHA256: 1c29a9264d72ac892fc2dbbda590193bc6576077261c4063046d94fe6a82ece3
  • Pointer size: 129 Bytes
  • Size of remote file: 2.37 kB
data/2025/2504_16xxx/2504.16915/images/09faad96bf101550a3b05cbc6476d051043ed07a04555bf067635a8647db808e.jpg ADDED

Git LFS Details

  • SHA256: 228f4fe337e6ce19d9282cb0278dbfa88341b9465a15b1a4482c087d08c5e3d6
  • Pointer size: 130 Bytes
  • Size of remote file: 20.5 kB
data/2025/2504_16xxx/2504.16915/images/0a37a84c34bcf97d4ae65a0d82bbcfc39b811e9371dcc7a49b0adb367688db03.jpg ADDED

Git LFS Details

  • SHA256: 5ff00157d2a971debe90ae522c9c5f35620805f0f34c4790c9c138bb576f927d
  • Pointer size: 130 Bytes
  • Size of remote file: 23.7 kB
data/2025/2504_16xxx/2504.16915/images/0f7ddb68b6175778bbefe5618e3f4f6ef9f17d91089c4a31b3703ec1b1f38160.jpg ADDED

Git LFS Details

  • SHA256: 5704c0c00351eb1f866368a0c3d3f349445372aec9d7529915cd39011e4d2791
  • Pointer size: 130 Bytes
  • Size of remote file: 11.6 kB
data/2025/2504_16xxx/2504.16915/images/133116da6d91d0149199e87f7ec2c280eab6112a92179f3df95f558eb764b680.jpg ADDED

Git LFS Details

  • SHA256: e079d2805353838a0a5f679153f45189bdcfa7d0cb108bb53d0f1806236d009f
  • Pointer size: 130 Bytes
  • Size of remote file: 21.2 kB
data/2025/2504_16xxx/2504.16915/images/19a5b372324eb3eb330e76b773d7bde6ca02dc4952249c8a77ceb8c6829803a2.jpg ADDED

Git LFS Details

  • SHA256: 9260ab5586e490fef9e98b885e436a2183f5bf564dd6a0b925d89839e4a2a4a4
  • Pointer size: 130 Bytes
  • Size of remote file: 64.4 kB
data/2025/2504_16xxx/2504.16915/images/1eb7bb537257f5472de95bbe7ca0af4248c5292d4a57eaa775dfb7317bfe0a0d.jpg ADDED

Git LFS Details

  • SHA256: 7edaccb3d1d5f8644d6fc346090cf48f6f731a446be81bfe9cc8628132d73f73
  • Pointer size: 130 Bytes
  • Size of remote file: 11.2 kB
data/2025/2504_16xxx/2504.16915/images/279495053316c8b020c2dd35bebec211f19221ae503c48c7d49c50c5ba115002.jpg ADDED

Git LFS Details

  • SHA256: 8261bb9f5b1fe0df463faf1267de08e7671356c6bfbcbcaba25ba4b1dc3548bd
  • Pointer size: 129 Bytes
  • Size of remote file: 3.22 kB
data/2025/2504_16xxx/2504.16915/images/34a7b121779591bdaa232e74d32181ee34cb59ac1f76781e9cfd988ee9d3fd36.jpg ADDED

Git LFS Details

  • SHA256: a578fa788ba06dedfdfe769fa2088431706e975b31c6a22264ca82fadc27e09e
  • Pointer size: 130 Bytes
  • Size of remote file: 30.3 kB
data/2025/2504_16xxx/2504.16915/images/39e7331373b2d07820d10133371317fc8d79b6d0906717822d02bab35db0cab9.jpg ADDED

Git LFS Details

  • SHA256: 08eead3505c78c49ffa2897c338f716a575ef595bdbb28dea61260217506cc16
  • Pointer size: 130 Bytes
  • Size of remote file: 18.5 kB
data/2025/2504_16xxx/2504.16915/images/3b186c98eb21f754d119318ae4505a43cf224efab1432d1b2f309745e0bd26d0.jpg ADDED

Git LFS Details

  • SHA256: fae27a8df0f913968192bc77a8ed105065fd32c5ea708db87f3abc67e0725b03
  • Pointer size: 129 Bytes
  • Size of remote file: 2.67 kB
data/2025/2504_16xxx/2504.16915/images/3d7c45991e96321253eeedc99a23a4aee4c99f044b2529919423e4a4e67dfa1d.jpg ADDED

Git LFS Details

  • SHA256: 51661c193dd279d881debe2abbdaccd06f80a1bd19d8012fc36c1ad440dc45a3
  • Pointer size: 129 Bytes
  • Size of remote file: 2.69 kB
data/2025/2504_16xxx/2504.16915/images/3f9b4e74553651c0ddbe13cd6321ae260acf1f75f8d0f0f4a5f81965d6a3dc84.jpg ADDED

Git LFS Details

  • SHA256: a79be44e827dab74890dbed2430d967ad45d9557ab8be3a218d3f5a20d10dcc6
  • Pointer size: 131 Bytes
  • Size of remote file: 151 kB
data/2025/2504_16xxx/2504.16915/images/3f9e1f0507ce8abd48a60c47b843dca5cf96a834f02b50c5e45888c4019f1281.jpg ADDED

Git LFS Details

  • SHA256: 0b0493619aaad6873cbe00cde9b0713e0950b4ef5ca156151cce893a9a5bd75e
  • Pointer size: 130 Bytes
  • Size of remote file: 10.9 kB
data/2025/2504_16xxx/2504.16915/images/4712a1a51ee9fd15a3fa96ea8683d6dfb530235f43d3ce9a836e3e12fb10be98.jpg ADDED

Git LFS Details

  • SHA256: 7a784534c25203dcad8675fa5dc1c5a671bee5c070ca6d3df126a84287e6e1a5
  • Pointer size: 129 Bytes
  • Size of remote file: 9.85 kB
data/2025/2504_16xxx/2504.16915/images/48d6ce06c9e3f7d066c086ec66a63afd5e45fa6e67cae473db8a93dc32b5bee6.jpg ADDED

Git LFS Details

  • SHA256: fb1ee11ce3efd3e4f57f81caa6dd9d6a249947d36aecefa7e9b05aa1c59a5c1d
  • Pointer size: 129 Bytes
  • Size of remote file: 8.85 kB
data/2025/2504_16xxx/2504.16915/images/512df60b48f29d339264abecf08830607357111e99bc45ac5d7dd6f4c81337d7.jpg ADDED

Git LFS Details

  • SHA256: 7ec58378685be674bcbb43558efb8428bdb2720977f7f8b0205ec2a0f5097254
  • Pointer size: 131 Bytes
  • Size of remote file: 140 kB
data/2025/2504_16xxx/2504.16915/images/59708c463d8b169038e1962f79cb519893304ca0a2b11d0b59026a5bfe791ce1.jpg ADDED

Git LFS Details

  • SHA256: 94a2728066d6e44027aa00ec99c1efaa19db79ef0fcedfd7dd740a9ea6b03b6c
  • Pointer size: 130 Bytes
  • Size of remote file: 12.7 kB
data/2025/2504_16xxx/2504.16915/images/6759b15ae1941017d09b2e234f99ae239f6ad90c61b12df9930421dd0689ad11.jpg ADDED

Git LFS Details

  • SHA256: 32103913d75b45b7edbb83cef7dc02e7a7587c7a7e38d9434489335d7a36367f
  • Pointer size: 130 Bytes
  • Size of remote file: 14.1 kB
data/2025/2504_16xxx/2504.16915/images/6d6f9a7f0b69294238f41794540c52bba98be7b4630308b5e75e65c7530d1ab7.jpg ADDED

Git LFS Details

  • SHA256: f99ebf9135d7a915d2a823b10d0e54d78cc24b3ec077cc1a4865d298e7332a26
  • Pointer size: 131 Bytes
  • Size of remote file: 144 kB
data/2025/2504_16xxx/2504.16915/images/6eeeee20f5cd2e9bce0722f58c88de082019986540f7c8f47f008cd9f840d613.jpg ADDED

Git LFS Details

  • SHA256: 8daf6a0dca359b25535907f3c8088e75521a1dfe3b154ad29bf590103d29bc18
  • Pointer size: 129 Bytes
  • Size of remote file: 4.57 kB
data/2025/2504_16xxx/2504.16915/images/78d1e4bd0466cad344cac7d4141bd55bd2e9cb79871f51594e29aada63a1240a.jpg ADDED

Git LFS Details

  • SHA256: 08d1152c45e47d2fb595df7a33ac193d4a214cd33914c42a7bbebe2ac8504db6
  • Pointer size: 129 Bytes
  • Size of remote file: 6.12 kB
data/2025/2504_16xxx/2504.16915/images/7b2a214f6fbc2c844a88ddcd4c48bf5d306ef500f8d7673ec7ba6a80caed4eb0.jpg ADDED

Git LFS Details

  • SHA256: e6bdcb186df730bac4e4104ff43d689b02a3eff24d5ac5d068670f2bf7948102
  • Pointer size: 131 Bytes
  • Size of remote file: 144 kB
data/2025/2504_16xxx/2504.16915/images/7bb98310f45445acde8f349cf99527a0253e1c53a050d68a0812a68546789b7d.jpg ADDED

Git LFS Details

  • SHA256: 0ce4873cbeea8bbfcb7ccde323eff188ae4b21da13b23cd282ce6c4ff3e069d0
  • Pointer size: 129 Bytes
  • Size of remote file: 4.12 kB
data/2025/2504_16xxx/2504.16915/images/83aa1c7a0518f1b56ac9817c9af2ed29f5d037a45aaa3f7c69c00cf2a36d6728.jpg ADDED

Git LFS Details

  • SHA256: 15cfb12bbf81078f26f0eff07512b0330c1be6186b2697f55d122a9e2d545ea9
  • Pointer size: 130 Bytes
  • Size of remote file: 12 kB
data/2025/2504_16xxx/2504.16915/images/87feb1edfe09a23211bbce95806941ffc02f28b39c9fb9d8d2a1c0ed450cb5d9.jpg ADDED

Git LFS Details

  • SHA256: 7b474e188846982f20290d3407b57ed8388c5ecac494e31b3c4a68fd91033886
  • Pointer size: 130 Bytes
  • Size of remote file: 11 kB
data/2025/2504_16xxx/2504.16915/images/8869e13e02c742ca66cab98f4bbd2b65a66bc17b61193d446b30ba1e56f3649f.jpg ADDED

Git LFS Details

  • SHA256: be83ad3dd4e370217ec94e85cac3c36e3b9848b14b7bbb80e8fd447fd0eedb5e
  • Pointer size: 130 Bytes
  • Size of remote file: 13 kB
data/2025/2504_16xxx/2504.16915/images/88ea109eb1ccbf381ca0aa3e52d19e4fc8614c363f1226a32695f7229c632802.jpg ADDED

Git LFS Details

  • SHA256: 86575a2ea23fbbcdd591c0718206efb20dfe241c238b5687b5ff85ffc756c615
  • Pointer size: 130 Bytes
  • Size of remote file: 12.6 kB
data/2025/2504_16xxx/2504.16915/images/8a50704bbd8d4bcd63432205dfb35bb65173b618c455b328507ccfd92595fbd6.jpg ADDED

Git LFS Details

  • SHA256: 169bb999e1ead8d9c5b0b4236e5e5e943de1ade4e74acd94e5f754680e9a26f4
  • Pointer size: 130 Bytes
  • Size of remote file: 15.6 kB
data/2025/2504_16xxx/2504.16915/images/8dbf023ee0aa591dc1781c1e102ced18c6364b930ef5ff055a9ae5a12fb123d9.jpg ADDED

Git LFS Details

  • SHA256: e49a6528fe035b30afd9ee56094c49535a98e22f3d9eabb261a899db6abe7ca9
  • Pointer size: 130 Bytes
  • Size of remote file: 31.1 kB
data/2025/2504_16xxx/2504.16915/images/8e592aa193623242c25a71bb25b65a80211ad006043b22c6da9512ab81073638.jpg ADDED

Git LFS Details

  • SHA256: fe1ecb9abcc454721380708595f0863cd0b54893b44d352c94a0674ee44587ff
  • Pointer size: 130 Bytes
  • Size of remote file: 19 kB
data/2025/2504_16xxx/2504.16915/images/8f9bef8c1c0048754e93ca438422aa05e02fc97d2186ec338f23f0c557d87e83.jpg ADDED

Git LFS Details

  • SHA256: 736cce0b72e77277938f00776f5928038782143811f014d4d0136f38f26af899
  • Pointer size: 130 Bytes
  • Size of remote file: 13.3 kB
data/2025/2504_16xxx/2504.16915/images/917dbb84a91b58d1b0e0eb31d1a6f488a793775e5842ef7d43caebbb9f2279fc.jpg ADDED

Git LFS Details

  • SHA256: af47b22829743cb4b39b37f1c3662438ccc77ff046f9010e99ef1e7ecf877b70
  • Pointer size: 129 Bytes
  • Size of remote file: 7.35 kB
data/2025/2504_16xxx/2504.16915/images/9451fc131c15f6204b3bd192f65fea00354fa43b91f156e8fe8b7823cea3216c.jpg ADDED

Git LFS Details

  • SHA256: 3b6c7e7ec9b643a67005ac9fba9749e84428eac69f77d06df19e10ccb019d0e0
  • Pointer size: 129 Bytes
  • Size of remote file: 4.49 kB
data/2025/2504_16xxx/2504.16915/images/9b995ccb1ac20db173f09243ec34675af732723ed93d62cd0372b311174c4936.jpg ADDED

Git LFS Details

  • SHA256: 520b6ee0e45d38bbb63610c6557136b38596d736dcc3bbdfa11f189034415831
  • Pointer size: 129 Bytes
  • Size of remote file: 7.71 kB
data/2025/2504_16xxx/2504.16915/images/a957a9c5df9c4a4746bf7469038ac30d1efbbaaeade4b2095e4bf9c8f172f86a.jpg ADDED

Git LFS Details

  • SHA256: 584a7a4f3a96c0a8e2fb983657c003bd24232439e9b4c08e640edc5d9b199a5f
  • Pointer size: 130 Bytes
  • Size of remote file: 68.5 kB
data/2025/2504_16xxx/2504.16915/images/aeb2b636c720541aaa9b57d12f25842ba7046487006e746bac70140cf3860a0e.jpg ADDED

Git LFS Details

  • SHA256: f103df3e8825e6f9946c5de9921dd92b5d65344a231d3d533f93a3ab8d1b3c3c
  • Pointer size: 130 Bytes
  • Size of remote file: 71.5 kB
data/2025/2504_16xxx/2504.16915/images/b3f856dddb811ccd38bba81c312339a9127f5f73f20ce41272e38f90ef08355e.jpg ADDED

Git LFS Details

  • SHA256: 70bb4e5e8381cece42dd7cacc8ebedaa1b1086599bb06e0cea90a1cc0a7ae251
  • Pointer size: 130 Bytes
  • Size of remote file: 77.5 kB
data/2025/2504_16xxx/2504.16915/images/b5cf1d26ce8ea9c42b6e64aac574a867e2cbe4885ce5de12f731901c7fa4f223.jpg ADDED

Git LFS Details

  • SHA256: c078f4e0547012ae576a58fc3db6c447d7d10998a8de9177e3baa1fc2d7f7122
  • Pointer size: 129 Bytes
  • Size of remote file: 5 kB
data/2025/2504_16xxx/2504.16915/images/b7ed295f87b9d59e7150dcf77881c1c4305daaeed8f846da0a18fc0b8447e890.jpg ADDED

Git LFS Details

  • SHA256: e0c5fcaf89ce6a98d500fdcd674052db10ff2c6f25e14bd676142ad8fff1348c
  • Pointer size: 130 Bytes
  • Size of remote file: 38.4 kB
data/2025/2504_16xxx/2504.16915/images/ba88c876e371b5384caf1ad00462d7f9a10f96dc6262f35dd6909224ae83ed7b.jpg ADDED

Git LFS Details

  • SHA256: ce5db4d7df695ae2b52aa93db36f39a4118a316d4d6ff364ddb36c9b03cd2c19
  • Pointer size: 130 Bytes
  • Size of remote file: 13.8 kB
data/2025/2504_16xxx/2504.16915/images/c03680d5e4b522a6c5c6dfcb68dfee64fcb6ad2fabcd589ae50215cf3f1b3158.jpg ADDED

Git LFS Details

  • SHA256: 66a80458a9794409bc9ab9583a04352129daaa1bcc04cc6bab7d99b1976a1a0f
  • Pointer size: 130 Bytes
  • Size of remote file: 40.5 kB