Azily commited on
Commit
a9c5a06
·
verified ·
1 Parent(s): 0ef2e30

Upload Macro-OmniGen2 model

Browse files
Files changed (49) hide show
  1. .gitattributes +9 -0
  2. README.md +228 -0
  3. assets/brand.png +3 -0
  4. assets/efficiency.png +3 -0
  5. assets/examples_edit.png +3 -0
  6. assets/examples_subject.png +3 -0
  7. assets/teaser.jpg +3 -0
  8. assets/teaser.png +3 -0
  9. mllm/config.json +65 -0
  10. mllm/generation_config.json +15 -0
  11. mllm/model-00001-of-00004.safetensors +3 -0
  12. mllm/model-00002-of-00004.safetensors +3 -0
  13. mllm/model-00003-of-00004.safetensors +3 -0
  14. mllm/model-00004-of-00004.safetensors +3 -0
  15. mllm/model.safetensors.index.json +831 -0
  16. mllm_processor/added_tokens.json +28 -0
  17. mllm_processor/chat_template.json +3 -0
  18. mllm_processor/merges.txt +0 -0
  19. mllm_processor/preprocessor_config.json +29 -0
  20. mllm_processor/special_tokens_map.json +31 -0
  21. mllm_processor/tokenizer.json +3 -0
  22. mllm_processor/tokenizer_config.json +241 -0
  23. mllm_processor/vocab.json +0 -0
  24. model_index.json +24 -0
  25. processor/added_tokens.json +28 -0
  26. processor/chat_template.json +3 -0
  27. processor/merges.txt +0 -0
  28. processor/preprocessor_config.json +29 -0
  29. processor/special_tokens_map.json +31 -0
  30. processor/tokenizer.json +3 -0
  31. processor/tokenizer_config.json +241 -0
  32. processor/vocab.json +0 -0
  33. scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-310.pyc +0 -0
  34. scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-311.pyc +0 -0
  35. scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-312.pyc +0 -0
  36. scheduler/scheduler_config.json +6 -0
  37. scheduler/scheduling_flow_match_euler_discrete.py +229 -0
  38. time_caption_context_reinit/config.json +27 -0
  39. time_caption_context_reinit/diffusion_pytorch_model-00001-of-00002.safetensors +3 -0
  40. time_caption_context_reinit/diffusion_pytorch_model-00002-of-00002.safetensors +3 -0
  41. time_caption_context_reinit/diffusion_pytorch_model.safetensors.index.json +589 -0
  42. time_caption_reinit/config.json +27 -0
  43. time_caption_reinit/diffusion_pytorch_model-00001-of-00002.safetensors +3 -0
  44. time_caption_reinit/diffusion_pytorch_model-00002-of-00002.safetensors +3 -0
  45. time_caption_reinit/diffusion_pytorch_model.safetensors.index.json +589 -0
  46. transformer/config.json +28 -0
  47. transformer/diffusion_pytorch_model.safetensors +3 -0
  48. vae/config.json +38 -0
  49. vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ mllm_processor/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ processor/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ assets/brand.png filter=lfs diff=lfs merge=lfs -text
40
+ assets/examples_edit.png filter=lfs diff=lfs merge=lfs -text
41
+ assets/examples_subject.png filter=lfs diff=lfs merge=lfs -text
42
+ assets/teaser.png filter=lfs diff=lfs merge=lfs -text
43
+ assets/efficiency.png filter=lfs diff=lfs merge=lfs -text
44
+ assets/teaser.jpg filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: any-to-any
4
+ library_name: diffusers
5
+ ---
6
+ <p align="center">
7
+ <a href="https://github.com/Ve<p align="center">
8
+ <img src="assets/brand.png" width="65%">
9
+ </p>
10
+
11
+ <p align="center">
12
+ <a href="https://vectorspacelab.github.io/OmniGen2"><img src="https://img.shields.io/badge/Project%20Page-OmniGen2-yellow" alt="project page"></a>
13
+ <a href="https://arxiv.org/abs/2506.18871"><img src="https://img.shields.io/badge/arXiv%20paper-2506.18871-b31b1b.svg" alt="arxiv"></a>
14
+ <a href="https://github.com/VectorSpaceLab/OmniGen2?tab=readme-ov-file#-gradio-demo"><img src="https://img.shields.io/badge/Online%20Demo-🤗-blue" alt="demo"></a>
15
+ <a href="https://huggingface.co/spaces/OmniGen2/OmniGen2"><img src="https://img.shields.io/badge/HF%20Spaces-🤗-lightblue" alt="demo"></a>
16
+ <a href="https://huggingface.co/OmniGen2/OmniGen2"><img src="https://img.shields.io/badge/Model-🤗-yellow" alt="model"></a>
17
+ <a href="https://huggingface.co/datasets/OmniGen2/OmniContext"><img src="https://img.shields.io/badge/Benchmark-🤗-yellow" alt="model"></a>
18
+ <a href="https://huggingface.co/datasets/OmniGen2/X2I2"><img src="https://img.shields.io/badge/Dataset-🤗-yellow" alt="model"></a>
19
+ </p>
20
+
21
+ <h4 align="center">
22
+ <p>
23
+ <a href=#-news>News</a> |
24
+ <a href=#-quick-start>Quick Start</a> |
25
+ <a href=#-usage-tips>Usage Tips</a> |
26
+ <a href=#-gradio-demo>Online Demos</a> |
27
+ <a href="#heart-citing-us">Citation</a> |
28
+ <a href="#license">License</a>
29
+ <p>
30
+ </h4>
31
+
32
+ ## 🔥 News
33
+ - **2025-07-05**: Training datasets [X2I2](https://huggingface.co/datasets/OmniGen2/X2I2) are available.
34
+ - **2025-07-03**: OmniGen2 now supports [TeaCache](https://github.com/ali-vilab/TeaCache) and [TaylorSeer](https://github.com/Shenyi-Z/TaylorSeer) for faster inference, see [Usage Tips](#-usage-tips) for details. Thanks @legitnull for great [TeaCache-PR](https://github.com/VectorSpaceLab/OmniGen2/pull/52) and [TaylorSeer-PR](https://github.com/VectorSpaceLab/OmniGen2/pull/76).
35
+ - **2025-07-01**: OmniGen2 is supported by [ComfyUI official](https://comfyanonymous.github.io/ComfyUI_examples/omnigen), thanks !!
36
+ - **2025-06-30**: Training code is available, see [fine-tuning](docs/FINETUNE.md) for details.
37
+ - **2025-06-28**: We release [OmniContext](https://huggingface.co/datasets/OmniGen2/OmniContext) benchmark. The evaluation codes are in [omnicontext](https://github.com/VectorSpaceLab/OmniGen2/tree/main/omnicontext).
38
+ - **2025-06-24**: [Technical Report](https://arxiv.org/abs/2506.18871) is available.
39
+ - **2025-06-23**: We’ve updated our code and HF model—OmniGen2 now runs *without* `flash-attn`. Users can still install it for optimal performance.
40
+ - **2025-06-20**: Updated [resource requirements](#-resources-requirement), adding CPU offload support for devices with limited VRAM.
41
+ - **2025-06-16**: [Gradio](https://github.com/VectorSpaceLab/OmniGen2?tab=readme-ov-file#-gradio-demo) and [Jupyter](https://github.com/VectorSpaceLab/OmniGen2/blob/main/example.ipynb) is available. Online Gradio Demo: [Demo1](https://9c4426d27c3b9ecbed.gradio.live); [Chat-Demo1](https://0351497834a4d7226c.gradio.live); see more demo links in [gradio section](https://github.com/VectorSpaceLab/OmniGen2?tab=readme-ov-file#-gradio-demo)
42
+ - **2025-06-16**: We release **OmniGen2**, a multimodal generation model, model weights can be accessed in [huggingface](https://huggingface.co/OmniGen2/OmniGen2) and [modelscope](https://www.modelscope.cn/models/OmniGen2/OmniGen2).
43
+
44
+
45
+ ## Introduction
46
+ **OmniGen2** is a powerful and efficient unified multimodal model. Unlike OmniGen v1, OmniGen2 features two distinct decoding pathways for text and image modalities, utilizing unshared parameters and a decoupled image tokenizer. OmniGen2 has competitive performance across four primary capabilities:
47
+
48
+ - **Visual Understanding**: Inherits the robust ability to interpret and analyze image content from its Qwen-VL-2.5 foundation.
49
+ - **Text-to-Image Generation**: Creates high-fidelity and aesthetically pleasing images from textual prompts.
50
+ - **Instruction-guided Image Editing**: Executes complex, instruction-based image modifications with high precision, achieving state-of-the-art performance among open-source models.
51
+ - **In-context Generation**: A versatile capability to process and flexibly combine diverse inputs—including humans, reference objects, and scenes—to produce novel and coherent visual outputs.
52
+
53
+ As an open-source project, OmniGen2 provides a powerful yet resource-efficient foundation for researchers and developers exploring the frontiers of controllable and personalized generative AI.
54
+
55
+ **We will release the training code, dataset, and data construction pipeline soon. Stay tuned!**
56
+
57
+ <p align="center">
58
+ <img src="assets/teaser.jpg" width="95%">
59
+ <br>
60
+ <em>Demonstration of OmniGen2's overall capabilities.</em>
61
+ </p>
62
+
63
+ <p align="center">
64
+ <img src="assets/examples_edit.png" width="95%">
65
+ <br>
66
+ <em>Demonstration of OmniGen2's image editing capabilities.</em>
67
+ </p>
68
+
69
+ <p align="center">
70
+ <img src="assets/examples_subject.png" width="95%">
71
+ <br>
72
+ <em>Demonstration of OmniGen2's in-context generation capabilities.</em>
73
+ </p>
74
+
75
+
76
+
77
+ ## 📌 TODO
78
+ - [x] Technical report.
79
+ - [x] Support CPU offload and improve inference efficiency.
80
+ - [x] In-context generation benchmark: **OmniContext**.
81
+ - [ ] Integration of diffusers.
82
+ - [x] Training datasets.
83
+ - [ ] Training data construction pipeline.
84
+ - [ ] ComfyUI Demo (**commuity support will be greatly appreciated!**).
85
+
86
+
87
+ ## 🚀 Quick Start
88
+
89
+ ### 🛠️ Environment Setup
90
+
91
+ #### ✅ Recommended Setup
92
+
93
+ ```bash
94
+ # 1. Clone the repo
95
+ git clone git@github.com:VectorSpaceLab/OmniGen2.git
96
+ cd OmniGen2
97
+
98
+ # 2. (Optional) Create a clean Python environment
99
+ conda create -n omnigen2 python=3.11
100
+ conda activate omnigen2
101
+
102
+ # 3. Install dependencies
103
+ # 3.1 Install PyTorch (choose correct CUDA version)
104
+ pip install torch==2.6.0 torchvision --extra-index-url https://download.pytorch.org/whl/cu124
105
+
106
+ # 3.2 Install other required packages
107
+ pip install -r requirements.txt
108
+
109
+ # Note: Version 2.7.4.post1 is specified for compatibility with CUDA 12.4.
110
+ # Feel free to use a newer version if you use CUDA 12.6 or they fixed this compatibility issue.
111
+ # OmniGen2 runs even without flash-attn, though we recommend install it for best performance.
112
+ pip install flash-attn==2.7.4.post1 --no-build-isolation
113
+ ```
114
+
115
+ #### 🌏 For users in Mainland China
116
+
117
+ ```bash
118
+ # Install PyTorch from a domestic mirror
119
+ pip install torch==2.6.0 torchvision --index-url https://mirror.sjtu.edu.cn/pytorch-wheels/cu124
120
+
121
+ # Install other dependencies from Tsinghua mirror
122
+ pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
123
+
124
+ # Note: Version 2.7.4.post1 is specified for compatibility with CUDA 12.4.
125
+ # Feel free to use a newer version if you use CUDA 12.6 or they fixed this compatibility issue.
126
+ # OmniGen2 runs even without flash-attn, though we recommend install it for best performance.
127
+ pip install flash-attn==2.7.4.post1 --no-build-isolation -i https://pypi.tuna.tsinghua.edu.cn/simple
128
+ ```
129
+
130
+ ---
131
+
132
+ ### 🧪 Run Examples
133
+
134
+ ```bash
135
+ # Visual Understanding
136
+ bash example_understanding.sh
137
+
138
+ # Text-to-image generation
139
+ bash example_t2i.sh
140
+
141
+ # Instruction-guided image editing
142
+ bash example_edit.sh
143
+
144
+ # In-context generation
145
+ bash example_in_context_generation.sh
146
+ ```
147
+
148
+ ---
149
+
150
+ ### 🌐 Gradio Demo
151
+
152
+ * **Online Demo**: [HF Spaces](https://huggingface.co/spaces/OmniGen2/OmniGen2). Beyond Hugging Face Spaces, we are *temporarily* allocating additional GPU resources to ensure smooth access to the online demos. If you notice a long queue for a particular link, please try other links:
153
+
154
+ [Demo1](https://8f10329141d53b6884.gradio.live), [Demo2](https://110863cb06c6c44bd2.gradio.live), [Demo3](https://19b0952eb3cf0d2243.gradio.live), [Demo4](https://981758b17b4197aea7.gradio.live)
155
+
156
+ [Chat-Demo1](https://9315447fc78ef638e3.gradio.live), [Chat-Demo2](https://abe054be89543e4cef.gradio.live), [Chat-Demo3](https://4aa913765db00bbe51.gradio.live), [Chat-Demo4](https://f28a8718565627d2cb.gradio.live)
157
+
158
+ <!-- [Available on Hugging Face Spaces 🚀](https://huggingface.co/spaces/Shitao/OmniGen2) -->
159
+
160
+ * **Run Locally**:
161
+ ```bash
162
+ # for only generating image
163
+ pip install gradio
164
+ python app.py
165
+ # Optional: Share demo with public link (You need to be able to access huggingface)
166
+ python app.py --share
167
+
168
+ # for generating image or text
169
+ pip install gradio
170
+ python app_chat.py
171
+ ```
172
+
173
+ ## 💡 Usage Tips
174
+ To achieve optimal results with OmniGen2, you can adjust the following key hyperparameters based on your specific use case.
175
+ - `text_guidance_scale`: Controls how strictly the output adheres to the text prompt (Classifier-Free Guidance).
176
+ - `image_guidance_scale`: This controls how much the final image should resemble the input reference image.
177
+ - **The Trade-off**: A higher value makes the output more faithful to the reference image's structure and style, but it might ignore parts of your text prompt. A lower value (~1.5) gives the text prompt more influence.
178
+ - **Tip**: For image editing task, we recommend to set it between 1.2 and 2.0; for in-context generateion task, a higher image_guidance_scale will maintian more details in input images, and we recommend to set it between 2.5 and 3.0.
179
+ - `max_pixels`: Automatically resizes images when their total pixel count (width × height) exceeds this limit, while maintaining its aspect ratio. This helps manage performance and memory usage.
180
+ - **Tip**: Default value is 1024*1024. You can reduce this value if you encounter memory issues.
181
+ - `max_input_image_side_length`: Maximum side length for input images.
182
+ - `negative_prompt`: Tell the model what you don't want to see in the image.
183
+ - **Example**: blurry, low quality, text, watermark
184
+ - **Tip**: For the best results, try experimenting with different negative prompts. If you're not sure, just use the default negative prompt.
185
+ - `enable_model_cpu_offload`: **Reduces VRAM usage by nearly 50% with a negligible impact on speed**.
186
+ - This is achieved by offloading the model weights to CPU RAM when they are not in use.
187
+ - See: [Model Offloading](https://huggingface.co/docs/diffusers/optimization/memory#model-offloading)
188
+ - `enable_sequential_cpu_offload`: Minimizes VRAM usage to less than 3GB, but at the cost of significantly slower performance.
189
+ - This works by offloading the model in submodules and loading them onto the GPU sequentially as needed.
190
+ - See: [CPU Offloading](https://huggingface.co/docs/diffusers/optimization/memory#cpu-offloading)
191
+ - `cfg_range_start`, `cfg_range_end`: Define the timestep range where CFG is applied. Per this [paper](https://arxiv.org/abs/2404.07724), reducing `cfg_range_end` can significantly decrease inference time with a negligible impact on quality.
192
+
193
+ **Some suggestions for improving generation quality:**
194
+ 1. Use High-Quality Images
195
+ - Provide clear images, preferably with a resolution **greater than 512×512 pixels**.
196
+ - Small or blurry inputs will result in low-quality outputs.
197
+ 2. Be Specific with Instructions
198
+ - Clearly describe both **what to change** and **how you want it changed**.
199
+ - For in-context generation tasks, explicitly state which elements should come from which image. For example, instead of "Add bird to desk", say "Add the bird from image 1 onto the desk in image 2."
200
+ 3. Prioritize English
201
+ The model currently performs best with **English** prompts.
202
+
203
+ ## 💻 Resources Requirement
204
+ OmniGen2 natively requires an **NVIDIA RTX 3090** or an equivalent GPU with approximately **17GB of VRAM**. For devices with less VRAM, you can enable **CPU Offload** to run the model.
205
+
206
+ **Performance Tip**: To improve inference speed, consider decreasing the `cfg_range_end` parameter. Within a reasonable range, this has a negligible impact on output quality.
207
+
208
+ The following table details the inference performance of OmniGen2 on an **A800 GPU**:
209
+ <p align="center">
210
+ <img src="assets/efficiency.png" width="95%">
211
+ <br>
212
+ <em>Inference Efficiency of OmniGen2.</em>
213
+ </p>
214
+
215
+ ## ❤️ Citing Us
216
+ If you find this repository or our work useful, please consider giving a star ⭐ and citation 🦖, which would be greatly appreciated (OmniGen2 report will be available as soon as possible):
217
+
218
+ ```bibtex
219
+ @article{wu2025omnigen2,
220
+ title={OmniGen2: Exploration to Advanced Multimodal Generation},
221
+ author={Chenyuan Wu and Pengfei Zheng and Ruiran Yan and Shitao Xiao and Xin Luo and Yueze Wang and Wanli Li and Xiyan Jiang and Yexin Liu and Junjie Zhou and Ze Liu and Ziyi Xia and Chaofan Li and Haoge Deng and Jiahao Wang and Kun Luo and Bo Zhang and Defu Lian and Xinlong Wang and Zhongyuan Wang and Tiejun Huang and Zheng Liu},
222
+ journal={arXiv preprint arXiv:2506.18871},
223
+ year={2025}
224
+ }
225
+ ```
226
+
227
+ ## License
228
+ This work is licensed under Apache 2.0 license.
assets/brand.png ADDED

Git LFS Details

  • SHA256: bb22c8bfcc06439e8543cf1f1d3954bf5f306d717a602bb765e3d25d300f25e5
  • Pointer size: 131 Bytes
  • Size of remote file: 337 kB
assets/efficiency.png ADDED

Git LFS Details

  • SHA256: 832a819af7b6585749e33e702c8605f8523a3cd78d199f1b589a7033b3b127c7
  • Pointer size: 131 Bytes
  • Size of remote file: 300 kB
assets/examples_edit.png ADDED

Git LFS Details

  • SHA256: 484a5f8afffcb61533acb64cbfb9a841bc31941120887774c16f8348bbc5143d
  • Pointer size: 132 Bytes
  • Size of remote file: 5.68 MB
assets/examples_subject.png ADDED

Git LFS Details

  • SHA256: 07f706c1e3a9c023918738b43e0a7611ebbb4dad3bf0535eb7bacbfd85081c81
  • Pointer size: 132 Bytes
  • Size of remote file: 7.77 MB
assets/teaser.jpg ADDED

Git LFS Details

  • SHA256: 77e7a964152438345d06e77159a4eef42265be3543ee03494ef7e92c312bf566
  • Pointer size: 132 Bytes
  • Size of remote file: 7.2 MB
assets/teaser.png ADDED

Git LFS Details

  • SHA256: 2a9dc726bd62baeba481ad3ad9f51b1582ab3980b5db3c5c944c131359fbcdd9
  • Pointer size: 132 Bytes
  • Size of remote file: 4.44 MB
mllm/config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2_5_VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 2048,
10
+ "image_token_id": 151655,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 128000,
14
+ "max_window_layers": 70,
15
+ "model_type": "qwen2_5_vl",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 36,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": {
21
+ "mrope_section": [
22
+ 16,
23
+ 24,
24
+ 24
25
+ ],
26
+ "rope_type": "default",
27
+ "type": "default"
28
+ },
29
+ "rope_theta": 1000000.0,
30
+ "sliding_window": 32768,
31
+ "tie_word_embeddings": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.51.3",
34
+ "use_cache": true,
35
+ "use_sliding_window": false,
36
+ "video_token_id": 151656,
37
+ "vision_config": {
38
+ "depth": 32,
39
+ "fullatt_block_indexes": [
40
+ 7,
41
+ 15,
42
+ 23,
43
+ 31
44
+ ],
45
+ "hidden_act": "silu",
46
+ "hidden_size": 1280,
47
+ "in_channels": 3,
48
+ "in_chans": 3,
49
+ "intermediate_size": 3420,
50
+ "model_type": "qwen2_5_vl",
51
+ "num_heads": 16,
52
+ "out_hidden_size": 2048,
53
+ "patch_size": 14,
54
+ "spatial_merge_size": 2,
55
+ "spatial_patch_size": 14,
56
+ "temporal_patch_size": 2,
57
+ "tokens_per_second": 2,
58
+ "torch_dtype": "float32",
59
+ "window_size": 112
60
+ },
61
+ "vision_end_token_id": 151653,
62
+ "vision_start_token_id": 151652,
63
+ "vision_token_id": 151654,
64
+ "vocab_size": 151936
65
+ }
mllm/generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation": "flash_attention_2",
3
+ "bos_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "pad_token_id": 151643,
10
+ "repetition_penalty": 1.05,
11
+ "temperature": 0.1,
12
+ "top_k": 1,
13
+ "top_p": 0.001,
14
+ "transformers_version": "4.51.3"
15
+ }
mllm/model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe5503539cf9335017bca96cd4409cec234de71af6bbe9a6035c0952e9319c2
3
+ size 4972304384
mllm/model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09b6f098d029dd4fadcc032d2d9da1914204d098c04a5a38f19bba23beb3039c
3
+ size 4932949248
mllm/model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa99a4bf5f586e0f6aa2e941a66283afc98df2a8c726496f717ff7939ec70651
3
+ size 4932949336
mllm/model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9bdab213ea510b2de9805ae054076d804fe578eb052460ed7988c8cc3aee114
3
+ size 180380208
mllm/model.safetensors.index.json ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15018491904
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
109
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
121
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
122
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
124
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
125
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
126
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
129
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
130
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
133
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
134
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
136
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
137
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
138
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
139
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
142
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
145
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
146
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
147
+ "model.layers.19.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
148
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
149
+ "model.layers.19.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
150
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
151
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
152
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
153
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
163
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
253
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
254
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
256
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
257
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
258
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
261
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
262
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
263
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
264
+ "model.layers.28.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
265
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
266
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
267
+ "model.layers.28.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
268
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
269
+ "model.layers.28.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
270
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
271
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
272
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
273
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
274
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
275
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
276
+ "model.layers.29.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
277
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
278
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
279
+ "model.layers.29.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
280
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
281
+ "model.layers.29.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
282
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
283
+ "model.layers.3.input_layernorm.weight": "model-00002-of-00004.safetensors",
284
+ "model.layers.3.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
285
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.3.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
287
+ "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
288
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
289
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
290
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
292
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
293
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
294
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
296
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
297
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
298
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
299
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
300
+ "model.layers.30.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
301
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
302
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
303
+ "model.layers.30.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
304
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
305
+ "model.layers.30.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
306
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
307
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
308
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
309
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
310
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
311
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
312
+ "model.layers.31.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
313
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
314
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
315
+ "model.layers.31.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
316
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
317
+ "model.layers.31.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
318
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
319
+ "model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
320
+ "model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
321
+ "model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
322
+ "model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
323
+ "model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
324
+ "model.layers.32.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
325
+ "model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
326
+ "model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
327
+ "model.layers.32.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
328
+ "model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
329
+ "model.layers.32.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
330
+ "model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
331
+ "model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
332
+ "model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
333
+ "model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
334
+ "model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
335
+ "model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
336
+ "model.layers.33.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
337
+ "model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
338
+ "model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
339
+ "model.layers.33.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
340
+ "model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
341
+ "model.layers.33.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
342
+ "model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
343
+ "model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
344
+ "model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
345
+ "model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
346
+ "model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
347
+ "model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
348
+ "model.layers.34.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
349
+ "model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
350
+ "model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
351
+ "model.layers.34.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
352
+ "model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
353
+ "model.layers.34.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
354
+ "model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
355
+ "model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
356
+ "model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
357
+ "model.layers.35.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
358
+ "model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
359
+ "model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
360
+ "model.layers.35.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
361
+ "model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
362
+ "model.layers.35.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
363
+ "model.layers.35.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
364
+ "model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
365
+ "model.layers.35.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
366
+ "model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
367
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00004.safetensors",
368
+ "model.layers.4.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
369
+ "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
370
+ "model.layers.4.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
371
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
372
+ "model.layers.4.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
373
+ "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
374
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
375
+ "model.layers.4.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
376
+ "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
377
+ "model.layers.4.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
378
+ "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
379
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
380
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
381
+ "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
382
+ "model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
383
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
384
+ "model.layers.5.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
385
+ "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
386
+ "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
387
+ "model.layers.5.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
388
+ "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
389
+ "model.layers.5.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
390
+ "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
391
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
392
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
393
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
394
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
395
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
396
+ "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
397
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
398
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
399
+ "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
400
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
401
+ "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
402
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
403
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
404
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
405
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
406
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
407
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
408
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
409
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
410
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
411
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
412
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
413
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
414
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
415
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
416
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
417
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
418
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
419
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
420
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
421
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
422
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
423
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
424
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
425
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
426
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
427
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
428
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
429
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
430
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
431
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
432
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
433
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
434
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
435
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
436
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
437
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
438
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
439
+ "model.norm.weight": "model-00004-of-00004.safetensors",
440
+ "visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
441
+ "visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
442
+ "visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
443
+ "visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
444
+ "visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
445
+ "visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
446
+ "visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
447
+ "visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
448
+ "visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
449
+ "visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
450
+ "visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
451
+ "visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
452
+ "visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
453
+ "visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
454
+ "visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
455
+ "visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
456
+ "visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
457
+ "visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
458
+ "visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
459
+ "visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
460
+ "visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
461
+ "visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
462
+ "visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
463
+ "visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
464
+ "visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
465
+ "visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
466
+ "visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
467
+ "visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
468
+ "visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
469
+ "visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
470
+ "visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
471
+ "visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
472
+ "visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
473
+ "visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
474
+ "visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
475
+ "visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
476
+ "visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
477
+ "visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
478
+ "visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
479
+ "visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
480
+ "visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
481
+ "visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
482
+ "visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
483
+ "visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
484
+ "visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
485
+ "visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
486
+ "visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
487
+ "visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
488
+ "visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
489
+ "visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
490
+ "visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
491
+ "visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
492
+ "visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
493
+ "visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
494
+ "visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
495
+ "visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
496
+ "visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
497
+ "visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
498
+ "visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
499
+ "visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
500
+ "visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
501
+ "visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
502
+ "visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
503
+ "visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
504
+ "visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
505
+ "visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
506
+ "visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
507
+ "visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
508
+ "visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
509
+ "visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
510
+ "visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
511
+ "visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
512
+ "visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
513
+ "visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
514
+ "visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
515
+ "visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
516
+ "visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
517
+ "visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
518
+ "visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
519
+ "visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
520
+ "visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
521
+ "visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
522
+ "visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
523
+ "visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
524
+ "visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
525
+ "visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
526
+ "visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
527
+ "visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
528
+ "visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
529
+ "visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
530
+ "visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
531
+ "visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
532
+ "visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
533
+ "visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
534
+ "visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
535
+ "visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
536
+ "visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
537
+ "visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
538
+ "visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
539
+ "visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
540
+ "visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
541
+ "visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
542
+ "visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
543
+ "visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
544
+ "visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
545
+ "visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
546
+ "visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
547
+ "visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
548
+ "visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
549
+ "visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
550
+ "visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
551
+ "visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
552
+ "visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
553
+ "visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
554
+ "visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
555
+ "visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
556
+ "visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
557
+ "visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
558
+ "visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
559
+ "visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
560
+ "visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
561
+ "visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
562
+ "visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
563
+ "visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
564
+ "visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
565
+ "visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
566
+ "visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
567
+ "visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
568
+ "visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
569
+ "visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
570
+ "visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
571
+ "visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
572
+ "visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
573
+ "visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
574
+ "visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
575
+ "visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
576
+ "visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
577
+ "visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
578
+ "visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
579
+ "visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
580
+ "visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
581
+ "visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
582
+ "visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
583
+ "visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
584
+ "visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
585
+ "visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
586
+ "visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
587
+ "visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
588
+ "visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
589
+ "visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
590
+ "visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
591
+ "visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
592
+ "visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
593
+ "visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
594
+ "visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
595
+ "visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
596
+ "visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
597
+ "visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
598
+ "visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
599
+ "visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
600
+ "visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
601
+ "visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
602
+ "visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
603
+ "visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
604
+ "visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
605
+ "visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
606
+ "visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
607
+ "visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
608
+ "visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
609
+ "visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
610
+ "visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
611
+ "visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
612
+ "visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
613
+ "visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
614
+ "visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
615
+ "visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
616
+ "visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
617
+ "visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
618
+ "visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
619
+ "visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
620
+ "visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
621
+ "visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
622
+ "visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
623
+ "visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
624
+ "visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
625
+ "visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
626
+ "visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
627
+ "visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
628
+ "visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
629
+ "visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
630
+ "visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
631
+ "visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
632
+ "visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
633
+ "visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
634
+ "visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
635
+ "visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
636
+ "visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
637
+ "visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
638
+ "visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
639
+ "visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
640
+ "visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
641
+ "visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
642
+ "visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
643
+ "visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
644
+ "visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
645
+ "visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
646
+ "visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
647
+ "visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
648
+ "visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
649
+ "visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
650
+ "visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
651
+ "visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
652
+ "visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
653
+ "visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
654
+ "visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
655
+ "visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
656
+ "visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
657
+ "visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
658
+ "visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
659
+ "visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
660
+ "visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
661
+ "visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
662
+ "visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
663
+ "visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
664
+ "visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
665
+ "visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
666
+ "visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
667
+ "visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
668
+ "visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
669
+ "visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
670
+ "visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
671
+ "visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
672
+ "visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
673
+ "visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
674
+ "visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
675
+ "visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
676
+ "visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
677
+ "visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
678
+ "visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
679
+ "visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
680
+ "visual.blocks.27.attn.proj.bias": "model-00001-of-00004.safetensors",
681
+ "visual.blocks.27.attn.proj.weight": "model-00001-of-00004.safetensors",
682
+ "visual.blocks.27.attn.qkv.bias": "model-00001-of-00004.safetensors",
683
+ "visual.blocks.27.attn.qkv.weight": "model-00001-of-00004.safetensors",
684
+ "visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
685
+ "visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
686
+ "visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
687
+ "visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
688
+ "visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
689
+ "visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
690
+ "visual.blocks.27.norm1.weight": "model-00001-of-00004.safetensors",
691
+ "visual.blocks.27.norm2.weight": "model-00001-of-00004.safetensors",
692
+ "visual.blocks.28.attn.proj.bias": "model-00001-of-00004.safetensors",
693
+ "visual.blocks.28.attn.proj.weight": "model-00001-of-00004.safetensors",
694
+ "visual.blocks.28.attn.qkv.bias": "model-00001-of-00004.safetensors",
695
+ "visual.blocks.28.attn.qkv.weight": "model-00001-of-00004.safetensors",
696
+ "visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
697
+ "visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
698
+ "visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
699
+ "visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
700
+ "visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
701
+ "visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
702
+ "visual.blocks.28.norm1.weight": "model-00001-of-00004.safetensors",
703
+ "visual.blocks.28.norm2.weight": "model-00001-of-00004.safetensors",
704
+ "visual.blocks.29.attn.proj.bias": "model-00001-of-00004.safetensors",
705
+ "visual.blocks.29.attn.proj.weight": "model-00001-of-00004.safetensors",
706
+ "visual.blocks.29.attn.qkv.bias": "model-00001-of-00004.safetensors",
707
+ "visual.blocks.29.attn.qkv.weight": "model-00001-of-00004.safetensors",
708
+ "visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
709
+ "visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
710
+ "visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
711
+ "visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
712
+ "visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
713
+ "visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
714
+ "visual.blocks.29.norm1.weight": "model-00001-of-00004.safetensors",
715
+ "visual.blocks.29.norm2.weight": "model-00001-of-00004.safetensors",
716
+ "visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
717
+ "visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
718
+ "visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
719
+ "visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
720
+ "visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
721
+ "visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
722
+ "visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
723
+ "visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
724
+ "visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
725
+ "visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
726
+ "visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
727
+ "visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
728
+ "visual.blocks.30.attn.proj.bias": "model-00001-of-00004.safetensors",
729
+ "visual.blocks.30.attn.proj.weight": "model-00001-of-00004.safetensors",
730
+ "visual.blocks.30.attn.qkv.bias": "model-00001-of-00004.safetensors",
731
+ "visual.blocks.30.attn.qkv.weight": "model-00001-of-00004.safetensors",
732
+ "visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
733
+ "visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
734
+ "visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
735
+ "visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
736
+ "visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
737
+ "visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
738
+ "visual.blocks.30.norm1.weight": "model-00001-of-00004.safetensors",
739
+ "visual.blocks.30.norm2.weight": "model-00001-of-00004.safetensors",
740
+ "visual.blocks.31.attn.proj.bias": "model-00001-of-00004.safetensors",
741
+ "visual.blocks.31.attn.proj.weight": "model-00001-of-00004.safetensors",
742
+ "visual.blocks.31.attn.qkv.bias": "model-00001-of-00004.safetensors",
743
+ "visual.blocks.31.attn.qkv.weight": "model-00001-of-00004.safetensors",
744
+ "visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
745
+ "visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
746
+ "visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
747
+ "visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
748
+ "visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
749
+ "visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
750
+ "visual.blocks.31.norm1.weight": "model-00001-of-00004.safetensors",
751
+ "visual.blocks.31.norm2.weight": "model-00001-of-00004.safetensors",
752
+ "visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
753
+ "visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
754
+ "visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
755
+ "visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
756
+ "visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
757
+ "visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
758
+ "visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
759
+ "visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
760
+ "visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
761
+ "visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
762
+ "visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
763
+ "visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
764
+ "visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
765
+ "visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
766
+ "visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
767
+ "visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
768
+ "visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
769
+ "visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
770
+ "visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
771
+ "visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
772
+ "visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
773
+ "visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
774
+ "visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
775
+ "visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
776
+ "visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
777
+ "visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
778
+ "visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
779
+ "visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
780
+ "visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
781
+ "visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
782
+ "visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
783
+ "visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
784
+ "visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
785
+ "visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
786
+ "visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
787
+ "visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
788
+ "visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
789
+ "visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
790
+ "visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
791
+ "visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
792
+ "visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
793
+ "visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
794
+ "visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
795
+ "visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
796
+ "visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
797
+ "visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
798
+ "visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
799
+ "visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
800
+ "visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
801
+ "visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
802
+ "visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
803
+ "visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
804
+ "visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
805
+ "visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
806
+ "visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
807
+ "visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
808
+ "visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
809
+ "visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
810
+ "visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
811
+ "visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
812
+ "visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
813
+ "visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
814
+ "visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
815
+ "visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
816
+ "visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
817
+ "visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
818
+ "visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
819
+ "visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
820
+ "visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
821
+ "visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
822
+ "visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
823
+ "visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
824
+ "visual.merger.ln_q.weight": "model-00001-of-00004.safetensors",
825
+ "visual.merger.mlp.0.bias": "model-00001-of-00004.safetensors",
826
+ "visual.merger.mlp.0.weight": "model-00001-of-00004.safetensors",
827
+ "visual.merger.mlp.2.bias": "model-00001-of-00004.safetensors",
828
+ "visual.merger.mlp.2.weight": "model-00001-of-00004.safetensors",
829
+ "visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors"
830
+ }
831
+ }
mllm_processor/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endofimg|>": 151666,
7
+ "<|endofmeta|>": 151668,
8
+ "<|endoftext|>": 151643,
9
+ "<|file_sep|>": 151664,
10
+ "<|fim_middle|>": 151660,
11
+ "<|fim_pad|>": 151662,
12
+ "<|fim_prefix|>": 151659,
13
+ "<|fim_suffix|>": 151661,
14
+ "<|im_end|>": 151645,
15
+ "<|im_start|>": 151644,
16
+ "<|image_pad|>": 151655,
17
+ "<|img|>": 151665,
18
+ "<|meta|>": 151667,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
mllm_processor/chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
mllm_processor/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
mllm_processor/preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 12845056,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2_5_VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 12845056,
26
+ "shortest_edge": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
mllm_processor/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
mllm_processor/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f12bcc978e3d112e092478b218f0161ef3d4bec08792866c99d29830772f08
3
+ size 11422644
mllm_processor/tokenizer_config.json ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<|img|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "151666": {
190
+ "content": "<|endofimg|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "151667": {
198
+ "content": "<|meta|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "151668": {
206
+ "content": "<|endofmeta|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
231
+ "clean_up_tokenization_spaces": false,
232
+ "eos_token": "<|im_end|>",
233
+ "errors": "replace",
234
+ "extra_special_tokens": {},
235
+ "model_max_length": 131072,
236
+ "pad_token": "<|endoftext|>",
237
+ "processor_class": "Qwen2_5_VLProcessor",
238
+ "split_special_tokens": false,
239
+ "tokenizer_class": "Qwen2Tokenizer",
240
+ "unk_token": null
241
+ }
mllm_processor/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
model_index.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "OmniGen2Pipeline",
3
+ "_diffusers_version": "0.33.1",
4
+ "mllm": [
5
+ "transformers",
6
+ "Qwen2_5_VLForConditionalGeneration"
7
+ ],
8
+ "processor": [
9
+ "transformers",
10
+ "Qwen2_5_VLProcessor"
11
+ ],
12
+ "scheduler": [
13
+ "scheduling_flow_match_euler_discrete",
14
+ "FlowMatchEulerDiscreteScheduler"
15
+ ],
16
+ "transformer": [
17
+ "transformer_omnigen2",
18
+ "OmniGen2Transformer2DModel"
19
+ ],
20
+ "vae": [
21
+ "diffusers",
22
+ "AutoencoderKL"
23
+ ]
24
+ }
processor/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endofimg|>": 151666,
7
+ "<|endofmeta|>": 151668,
8
+ "<|endoftext|>": 151643,
9
+ "<|file_sep|>": 151664,
10
+ "<|fim_middle|>": 151660,
11
+ "<|fim_pad|>": 151662,
12
+ "<|fim_prefix|>": 151659,
13
+ "<|fim_suffix|>": 151661,
14
+ "<|im_end|>": 151645,
15
+ "<|im_start|>": 151644,
16
+ "<|image_pad|>": 151655,
17
+ "<|img|>": 151665,
18
+ "<|meta|>": 151667,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
processor/chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
processor/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
processor/preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 12845056,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2_5_VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 12845056,
26
+ "shortest_edge": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
processor/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
processor/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f12bcc978e3d112e092478b218f0161ef3d4bec08792866c99d29830772f08
3
+ size 11422644
processor/tokenizer_config.json ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<|img|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "151666": {
190
+ "content": "<|endofimg|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "151667": {
198
+ "content": "<|meta|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "151668": {
206
+ "content": "<|endofmeta|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
231
+ "clean_up_tokenization_spaces": false,
232
+ "eos_token": "<|im_end|>",
233
+ "errors": "replace",
234
+ "extra_special_tokens": {},
235
+ "model_max_length": 131072,
236
+ "pad_token": "<|endoftext|>",
237
+ "processor_class": "Qwen2_5_VLProcessor",
238
+ "split_special_tokens": false,
239
+ "tokenizer_class": "Qwen2Tokenizer",
240
+ "unk_token": null
241
+ }
processor/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-310.pyc ADDED
Binary file (7.68 kB). View file
 
scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-311.pyc ADDED
Binary file (10.3 kB). View file
 
scheduler/__pycache__/scheduling_flow_match_euler_discrete.cpython-312.pyc ADDED
Binary file (9.95 kB). View file
 
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
3
+ "_diffusers_version": "0.33.1",
4
+ "dynamic_time_shift": true,
5
+ "num_train_timesteps": 1000
6
+ }
scheduler/scheduling_flow_match_euler_discrete.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from dataclasses import dataclass
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+
22
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
23
+ from diffusers.utils import BaseOutput, logging
24
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ @dataclass
31
+ class FlowMatchEulerDiscreteSchedulerOutput(BaseOutput):
32
+ """
33
+ Output class for the scheduler's `step` function output.
34
+
35
+ Args:
36
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
37
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
38
+ denoising loop.
39
+ """
40
+
41
+ prev_sample: torch.FloatTensor
42
+
43
+
44
+ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
45
+ """
46
+ Euler scheduler.
47
+
48
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
49
+ methods the library implements for all schedulers such as loading and saving.
50
+
51
+ Args:
52
+ num_train_timesteps (`int`, defaults to 1000):
53
+ The number of diffusion steps to train the model.
54
+ timestep_spacing (`str`, defaults to `"linspace"`):
55
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
56
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
57
+ shift (`float`, defaults to 1.0):
58
+ The shift value for the timestep schedule.
59
+ """
60
+
61
+ _compatibles = []
62
+ order = 1
63
+
64
+ @register_to_config
65
+ def __init__(
66
+ self,
67
+ num_train_timesteps: int = 1000,
68
+ dynamic_time_shift: bool = False
69
+ ):
70
+ timesteps = torch.linspace(0, 1, num_train_timesteps + 1, dtype=torch.float32)[:-1]
71
+
72
+ self.timesteps = timesteps
73
+
74
+ self._step_index = None
75
+ self._begin_index = None
76
+
77
+ @property
78
+ def step_index(self):
79
+ """
80
+ The index counter for current timestep. It will increase 1 after each scheduler step.
81
+ """
82
+ return self._step_index
83
+
84
+ @property
85
+ def begin_index(self):
86
+ """
87
+ The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
88
+ """
89
+ return self._begin_index
90
+
91
+ # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
92
+ def set_begin_index(self, begin_index: int = 0):
93
+ """
94
+ Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
95
+
96
+ Args:
97
+ begin_index (`int`):
98
+ The begin index for the scheduler.
99
+ """
100
+ self._begin_index = begin_index
101
+
102
+ def index_for_timestep(self, timestep, schedule_timesteps=None):
103
+ if schedule_timesteps is None:
104
+ schedule_timesteps = self._timesteps
105
+
106
+ indices = (schedule_timesteps == timestep).nonzero()
107
+
108
+ # The sigma index that is taken for the **very** first `step`
109
+ # is always the second index (or the last index if there is only 1)
110
+ # This way we can ensure we don't accidentally skip a sigma in
111
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
112
+ pos = 1 if len(indices) > 1 else 0
113
+
114
+ return indices[pos].item()
115
+
116
+ # def time_shift(self, mu: float, sigma: float, t: torch.Tensor):
117
+ # return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
118
+
119
+ def set_timesteps(
120
+ self,
121
+ num_inference_steps: int = None,
122
+ device: Union[str, torch.device] = None,
123
+ timesteps: Optional[List[float]] = None,
124
+ num_tokens: Optional[int] = None
125
+ ):
126
+ """
127
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
128
+
129
+ Args:
130
+ num_inference_steps (`int`):
131
+ The number of diffusion steps used when generating samples with a pre-trained model.
132
+ device (`str` or `torch.device`, *optional*):
133
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
134
+ """
135
+
136
+ if timesteps is None:
137
+ self.num_inference_steps = num_inference_steps
138
+ timesteps = np.linspace(0, 1, num_inference_steps + 1, dtype=np.float32)[:-1]
139
+ if self.config.dynamic_time_shift and num_tokens is not None:
140
+ m = np.sqrt(num_tokens) / 40 # when input resolution is 320 * 320, m = 1, when input resolution is 1024 * 1024, m = 3.2
141
+ timesteps = timesteps / (m - m * timesteps + timesteps)
142
+
143
+ timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32, device=device)
144
+ _timesteps = torch.cat([timesteps, torch.ones(1, device=timesteps.device)])
145
+
146
+ self.timesteps = timesteps
147
+ self._timesteps = _timesteps
148
+ self._step_index = None
149
+ self._begin_index = None
150
+
151
+ def _init_step_index(self, timestep):
152
+ if self.begin_index is None:
153
+ if isinstance(timestep, torch.Tensor):
154
+ timestep = timestep.to(self.timesteps.device)
155
+ self._step_index = self.index_for_timestep(timestep)
156
+ else:
157
+ self._step_index = self._begin_index
158
+
159
+ def step(
160
+ self,
161
+ model_output: torch.FloatTensor,
162
+ timestep: Union[float, torch.FloatTensor],
163
+ sample: torch.FloatTensor,
164
+ generator: Optional[torch.Generator] = None,
165
+ return_dict: bool = True,
166
+ ) -> Union[FlowMatchEulerDiscreteSchedulerOutput, Tuple]:
167
+ """
168
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
169
+ process from the learned model outputs (most often the predicted noise).
170
+
171
+ Args:
172
+ model_output (`torch.FloatTensor`):
173
+ The direct output from learned diffusion model.
174
+ timestep (`float`):
175
+ The current discrete timestep in the diffusion chain.
176
+ sample (`torch.FloatTensor`):
177
+ A current instance of a sample created by the diffusion process.
178
+ s_churn (`float`):
179
+ s_tmin (`float`):
180
+ s_tmax (`float`):
181
+ s_noise (`float`, defaults to 1.0):
182
+ Scaling factor for noise added to the sample.
183
+ generator (`torch.Generator`, *optional*):
184
+ A random number generator.
185
+ return_dict (`bool`):
186
+ Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or
187
+ tuple.
188
+
189
+ Returns:
190
+ [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`:
191
+ If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is
192
+ returned, otherwise a tuple is returned where the first element is the sample tensor.
193
+ """
194
+
195
+ if (
196
+ isinstance(timestep, int)
197
+ or isinstance(timestep, torch.IntTensor)
198
+ or isinstance(timestep, torch.LongTensor)
199
+ ):
200
+ raise ValueError(
201
+ (
202
+ "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
203
+ " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
204
+ " one of the `scheduler.timesteps` as a timestep."
205
+ ),
206
+ )
207
+
208
+ if self.step_index is None:
209
+ self._init_step_index(timestep)
210
+ # Upcast to avoid precision issues when computing prev_sample
211
+ sample = sample.to(torch.float32)
212
+ t = self._timesteps[self.step_index]
213
+ t_next = self._timesteps[self.step_index + 1]
214
+
215
+ prev_sample = sample + (t_next - t) * model_output
216
+
217
+ # Cast sample back to model compatible dtype
218
+ prev_sample = prev_sample.to(model_output.dtype)
219
+
220
+ # upon completion increase step index by one
221
+ self._step_index += 1
222
+
223
+ if not return_dict:
224
+ return (prev_sample,)
225
+
226
+ return FlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample)
227
+
228
+ def __len__(self):
229
+ return self.config.num_train_timesteps
time_caption_context_reinit/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "OmniGen2Transformer2DModel",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "axes_dim_rope": [
5
+ 40,
6
+ 40,
7
+ 40
8
+ ],
9
+ "axes_lens": [
10
+ 10000,
11
+ 10000,
12
+ 10000
13
+ ],
14
+ "ffn_dim_multiplier": null,
15
+ "hidden_size": 2520,
16
+ "in_channels": 16,
17
+ "multiple_of": 256,
18
+ "norm_eps": 1e-05,
19
+ "num_attention_heads": 21,
20
+ "num_kv_heads": 7,
21
+ "num_layers": 32,
22
+ "num_refiner_layers": 2,
23
+ "out_channels": null,
24
+ "patch_size": 2,
25
+ "text_feat_dim": 3584,
26
+ "timestep_scale": 1000
27
+ }
time_caption_context_reinit/diffusion_pytorch_model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb80cc5bd00e6cc7e8c18d74342872b6de1c7f33fc34f5ab147d1d4bf5bd10da
3
+ size 9969124696
time_caption_context_reinit/diffusion_pytorch_model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d22479336b8e493506d13dea6c4d0ebadc94b0734e1a465ffc8023113733a45
3
+ size 5915071312
time_caption_context_reinit/diffusion_pytorch_model.safetensors.index.json ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15884134624
4
+ },
5
+ "weight_map": {
6
+ "context_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
7
+ "context_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
8
+ "context_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
9
+ "context_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
10
+ "context_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
11
+ "context_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
12
+ "context_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
13
+ "context_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
14
+ "context_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
15
+ "context_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
16
+ "context_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
17
+ "context_refiner.0.norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
18
+ "context_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
19
+ "context_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
20
+ "context_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
21
+ "context_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
22
+ "context_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
23
+ "context_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
24
+ "context_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
25
+ "context_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
26
+ "context_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
27
+ "context_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
28
+ "context_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
29
+ "context_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
30
+ "context_refiner.1.norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
31
+ "context_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
32
+ "image_index_embedding": "diffusion_pytorch_model-00001-of-00002.safetensors",
33
+ "layers.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
34
+ "layers.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
35
+ "layers.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
36
+ "layers.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
37
+ "layers.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
38
+ "layers.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
39
+ "layers.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
40
+ "layers.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
41
+ "layers.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
42
+ "layers.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
43
+ "layers.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
44
+ "layers.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
45
+ "layers.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
46
+ "layers.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
47
+ "layers.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
48
+ "layers.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
49
+ "layers.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
50
+ "layers.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
51
+ "layers.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
52
+ "layers.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
53
+ "layers.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
54
+ "layers.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
55
+ "layers.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
56
+ "layers.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
57
+ "layers.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
58
+ "layers.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
59
+ "layers.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
60
+ "layers.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
61
+ "layers.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
62
+ "layers.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
63
+ "layers.10.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
64
+ "layers.10.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
65
+ "layers.10.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
66
+ "layers.10.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
67
+ "layers.10.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
68
+ "layers.10.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
69
+ "layers.10.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
70
+ "layers.10.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
71
+ "layers.10.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
72
+ "layers.10.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
73
+ "layers.10.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
74
+ "layers.10.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
75
+ "layers.10.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
76
+ "layers.10.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
77
+ "layers.10.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
78
+ "layers.11.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
79
+ "layers.11.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
80
+ "layers.11.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
81
+ "layers.11.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
82
+ "layers.11.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
83
+ "layers.11.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
84
+ "layers.11.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
85
+ "layers.11.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
86
+ "layers.11.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
87
+ "layers.11.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
88
+ "layers.11.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
89
+ "layers.11.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
90
+ "layers.11.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
91
+ "layers.11.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
92
+ "layers.11.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
93
+ "layers.12.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
94
+ "layers.12.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
95
+ "layers.12.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
96
+ "layers.12.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
97
+ "layers.12.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
98
+ "layers.12.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
99
+ "layers.12.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
100
+ "layers.12.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
101
+ "layers.12.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
102
+ "layers.12.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
103
+ "layers.12.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
104
+ "layers.12.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
105
+ "layers.12.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
106
+ "layers.12.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
107
+ "layers.12.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
108
+ "layers.13.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
109
+ "layers.13.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
110
+ "layers.13.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
111
+ "layers.13.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
112
+ "layers.13.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
113
+ "layers.13.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
114
+ "layers.13.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
115
+ "layers.13.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
116
+ "layers.13.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
117
+ "layers.13.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
118
+ "layers.13.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
119
+ "layers.13.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
120
+ "layers.13.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
121
+ "layers.13.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
122
+ "layers.13.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
123
+ "layers.14.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
124
+ "layers.14.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
125
+ "layers.14.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
126
+ "layers.14.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
127
+ "layers.14.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
128
+ "layers.14.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
129
+ "layers.14.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
130
+ "layers.14.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
131
+ "layers.14.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
132
+ "layers.14.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
133
+ "layers.14.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
134
+ "layers.14.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
135
+ "layers.14.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
136
+ "layers.14.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
137
+ "layers.14.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
138
+ "layers.15.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
139
+ "layers.15.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
140
+ "layers.15.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
141
+ "layers.15.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
142
+ "layers.15.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
143
+ "layers.15.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
144
+ "layers.15.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
145
+ "layers.15.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
146
+ "layers.15.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
147
+ "layers.15.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
148
+ "layers.15.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
149
+ "layers.15.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
150
+ "layers.15.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
151
+ "layers.15.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
152
+ "layers.15.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
153
+ "layers.16.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
154
+ "layers.16.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
155
+ "layers.16.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
156
+ "layers.16.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
157
+ "layers.16.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
158
+ "layers.16.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
159
+ "layers.16.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
160
+ "layers.16.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
161
+ "layers.16.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
162
+ "layers.16.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
163
+ "layers.16.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
164
+ "layers.16.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
165
+ "layers.16.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
166
+ "layers.16.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
167
+ "layers.16.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
168
+ "layers.17.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
169
+ "layers.17.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
170
+ "layers.17.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
171
+ "layers.17.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
172
+ "layers.17.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
173
+ "layers.17.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
174
+ "layers.17.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
175
+ "layers.17.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
176
+ "layers.17.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
177
+ "layers.17.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
178
+ "layers.17.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
179
+ "layers.17.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
180
+ "layers.17.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
181
+ "layers.17.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
182
+ "layers.17.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
183
+ "layers.18.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
184
+ "layers.18.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
185
+ "layers.18.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
186
+ "layers.18.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
187
+ "layers.18.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
188
+ "layers.18.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
189
+ "layers.18.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
190
+ "layers.18.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
191
+ "layers.18.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
192
+ "layers.18.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
193
+ "layers.18.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
194
+ "layers.18.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
195
+ "layers.18.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
196
+ "layers.18.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
197
+ "layers.18.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
198
+ "layers.19.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
199
+ "layers.19.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
200
+ "layers.19.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
201
+ "layers.19.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
202
+ "layers.19.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
203
+ "layers.19.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
204
+ "layers.19.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
205
+ "layers.19.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
206
+ "layers.19.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
207
+ "layers.19.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
208
+ "layers.19.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
209
+ "layers.19.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
210
+ "layers.19.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
211
+ "layers.19.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
212
+ "layers.19.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
213
+ "layers.2.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
214
+ "layers.2.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
215
+ "layers.2.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
216
+ "layers.2.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
217
+ "layers.2.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
218
+ "layers.2.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
219
+ "layers.2.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
220
+ "layers.2.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
221
+ "layers.2.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
222
+ "layers.2.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
223
+ "layers.2.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
224
+ "layers.2.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
225
+ "layers.2.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
226
+ "layers.2.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
227
+ "layers.2.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
228
+ "layers.20.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
229
+ "layers.20.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
230
+ "layers.20.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
231
+ "layers.20.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
232
+ "layers.20.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
233
+ "layers.20.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
234
+ "layers.20.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
235
+ "layers.20.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
236
+ "layers.20.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
237
+ "layers.20.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
238
+ "layers.20.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
239
+ "layers.20.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
240
+ "layers.20.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
241
+ "layers.20.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
242
+ "layers.20.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
243
+ "layers.21.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
244
+ "layers.21.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
245
+ "layers.21.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
246
+ "layers.21.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
247
+ "layers.21.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
248
+ "layers.21.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
249
+ "layers.21.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
250
+ "layers.21.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
251
+ "layers.21.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
252
+ "layers.21.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
253
+ "layers.21.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
254
+ "layers.21.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
255
+ "layers.21.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
256
+ "layers.21.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
257
+ "layers.21.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
258
+ "layers.22.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
259
+ "layers.22.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
260
+ "layers.22.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
261
+ "layers.22.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
262
+ "layers.22.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
263
+ "layers.22.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
264
+ "layers.22.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
265
+ "layers.22.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
266
+ "layers.22.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
267
+ "layers.22.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
268
+ "layers.22.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
269
+ "layers.22.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
270
+ "layers.22.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
271
+ "layers.22.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
272
+ "layers.22.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
273
+ "layers.23.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
274
+ "layers.23.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
275
+ "layers.23.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
276
+ "layers.23.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
277
+ "layers.23.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
278
+ "layers.23.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
279
+ "layers.23.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
280
+ "layers.23.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
281
+ "layers.23.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
282
+ "layers.23.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
283
+ "layers.23.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
284
+ "layers.23.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
285
+ "layers.23.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
286
+ "layers.23.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
287
+ "layers.23.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
288
+ "layers.24.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
289
+ "layers.24.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
290
+ "layers.24.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
291
+ "layers.24.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
292
+ "layers.24.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
293
+ "layers.24.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
294
+ "layers.24.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
295
+ "layers.24.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
296
+ "layers.24.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
297
+ "layers.24.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
298
+ "layers.24.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
299
+ "layers.24.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
300
+ "layers.24.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
301
+ "layers.24.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
302
+ "layers.24.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
303
+ "layers.25.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
304
+ "layers.25.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
305
+ "layers.25.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
306
+ "layers.25.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
307
+ "layers.25.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
308
+ "layers.25.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
309
+ "layers.25.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
310
+ "layers.25.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
311
+ "layers.25.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
312
+ "layers.25.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
313
+ "layers.25.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
314
+ "layers.25.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
315
+ "layers.25.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
316
+ "layers.25.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
317
+ "layers.25.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
318
+ "layers.26.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
319
+ "layers.26.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
320
+ "layers.26.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
321
+ "layers.26.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
322
+ "layers.26.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
323
+ "layers.26.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
324
+ "layers.26.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
325
+ "layers.26.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
326
+ "layers.26.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
327
+ "layers.26.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
328
+ "layers.26.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
329
+ "layers.26.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
330
+ "layers.26.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
331
+ "layers.26.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
332
+ "layers.26.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
333
+ "layers.27.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
334
+ "layers.27.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
335
+ "layers.27.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
336
+ "layers.27.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
337
+ "layers.27.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
338
+ "layers.27.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
339
+ "layers.27.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
340
+ "layers.27.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
341
+ "layers.27.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
342
+ "layers.27.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
343
+ "layers.27.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
344
+ "layers.27.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
345
+ "layers.27.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
346
+ "layers.27.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
347
+ "layers.27.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
348
+ "layers.28.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
349
+ "layers.28.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
350
+ "layers.28.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
351
+ "layers.28.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
352
+ "layers.28.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
353
+ "layers.28.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
354
+ "layers.28.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
355
+ "layers.28.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
356
+ "layers.28.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
357
+ "layers.28.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
358
+ "layers.28.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
359
+ "layers.28.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
360
+ "layers.28.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
361
+ "layers.28.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
362
+ "layers.28.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
363
+ "layers.29.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
364
+ "layers.29.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
365
+ "layers.29.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
366
+ "layers.29.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
367
+ "layers.29.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
368
+ "layers.29.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
369
+ "layers.29.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
370
+ "layers.29.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
371
+ "layers.29.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
372
+ "layers.29.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
373
+ "layers.29.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
374
+ "layers.29.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
375
+ "layers.29.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
376
+ "layers.29.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
377
+ "layers.29.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
378
+ "layers.3.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
379
+ "layers.3.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
380
+ "layers.3.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
381
+ "layers.3.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
382
+ "layers.3.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
383
+ "layers.3.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
384
+ "layers.3.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
385
+ "layers.3.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
386
+ "layers.3.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
387
+ "layers.3.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
388
+ "layers.3.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
389
+ "layers.3.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
390
+ "layers.3.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
391
+ "layers.3.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
392
+ "layers.3.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
393
+ "layers.30.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
394
+ "layers.30.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
395
+ "layers.30.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
396
+ "layers.30.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
397
+ "layers.30.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
398
+ "layers.30.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
399
+ "layers.30.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
400
+ "layers.30.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
401
+ "layers.30.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
402
+ "layers.30.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
403
+ "layers.30.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
404
+ "layers.30.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
405
+ "layers.30.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
406
+ "layers.30.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
407
+ "layers.30.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
408
+ "layers.31.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
409
+ "layers.31.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
410
+ "layers.31.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
411
+ "layers.31.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
412
+ "layers.31.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
413
+ "layers.31.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
414
+ "layers.31.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
415
+ "layers.31.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
416
+ "layers.31.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
417
+ "layers.31.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
418
+ "layers.31.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
419
+ "layers.31.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
420
+ "layers.31.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
421
+ "layers.31.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
422
+ "layers.31.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
423
+ "layers.4.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
424
+ "layers.4.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
425
+ "layers.4.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
426
+ "layers.4.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
427
+ "layers.4.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
428
+ "layers.4.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
429
+ "layers.4.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
430
+ "layers.4.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
431
+ "layers.4.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
432
+ "layers.4.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
433
+ "layers.4.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
434
+ "layers.4.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
435
+ "layers.4.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
436
+ "layers.4.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
437
+ "layers.4.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
438
+ "layers.5.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
439
+ "layers.5.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
440
+ "layers.5.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
441
+ "layers.5.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
442
+ "layers.5.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
443
+ "layers.5.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
444
+ "layers.5.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
445
+ "layers.5.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
446
+ "layers.5.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
447
+ "layers.5.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
448
+ "layers.5.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
449
+ "layers.5.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
450
+ "layers.5.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
451
+ "layers.5.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
452
+ "layers.5.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
453
+ "layers.6.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
454
+ "layers.6.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
455
+ "layers.6.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
456
+ "layers.6.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
457
+ "layers.6.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
458
+ "layers.6.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
459
+ "layers.6.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
460
+ "layers.6.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
461
+ "layers.6.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
462
+ "layers.6.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
463
+ "layers.6.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
464
+ "layers.6.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
465
+ "layers.6.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
466
+ "layers.6.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
467
+ "layers.6.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
468
+ "layers.7.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
469
+ "layers.7.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
470
+ "layers.7.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
471
+ "layers.7.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
472
+ "layers.7.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
473
+ "layers.7.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
474
+ "layers.7.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
475
+ "layers.7.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
476
+ "layers.7.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
477
+ "layers.7.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
478
+ "layers.7.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
479
+ "layers.7.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
480
+ "layers.7.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
481
+ "layers.7.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
482
+ "layers.7.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
483
+ "layers.8.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
484
+ "layers.8.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
485
+ "layers.8.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
486
+ "layers.8.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
487
+ "layers.8.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
488
+ "layers.8.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
489
+ "layers.8.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
490
+ "layers.8.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
491
+ "layers.8.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
492
+ "layers.8.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
493
+ "layers.8.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
494
+ "layers.8.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
495
+ "layers.8.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
496
+ "layers.8.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
497
+ "layers.8.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
498
+ "layers.9.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
499
+ "layers.9.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
500
+ "layers.9.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
501
+ "layers.9.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
502
+ "layers.9.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
503
+ "layers.9.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
504
+ "layers.9.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
505
+ "layers.9.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
506
+ "layers.9.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
507
+ "layers.9.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
508
+ "layers.9.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
509
+ "layers.9.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
510
+ "layers.9.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
511
+ "layers.9.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
512
+ "layers.9.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
513
+ "noise_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
514
+ "noise_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
515
+ "noise_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
516
+ "noise_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
517
+ "noise_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
518
+ "noise_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
519
+ "noise_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
520
+ "noise_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
521
+ "noise_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
522
+ "noise_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
523
+ "noise_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
524
+ "noise_refiner.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
525
+ "noise_refiner.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
526
+ "noise_refiner.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
527
+ "noise_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
528
+ "noise_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
529
+ "noise_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
530
+ "noise_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
531
+ "noise_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
532
+ "noise_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
533
+ "noise_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
534
+ "noise_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
535
+ "noise_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
536
+ "noise_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
537
+ "noise_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
538
+ "noise_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
539
+ "noise_refiner.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
540
+ "noise_refiner.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
541
+ "noise_refiner.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
542
+ "noise_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
543
+ "norm_out.linear_1.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
544
+ "norm_out.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
545
+ "norm_out.linear_2.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
546
+ "norm_out.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
547
+ "ref_image_patch_embedder.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
548
+ "ref_image_patch_embedder.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
549
+ "ref_image_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
550
+ "ref_image_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
551
+ "ref_image_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
552
+ "ref_image_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
553
+ "ref_image_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
554
+ "ref_image_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
555
+ "ref_image_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
556
+ "ref_image_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
557
+ "ref_image_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
558
+ "ref_image_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
559
+ "ref_image_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
560
+ "ref_image_refiner.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
561
+ "ref_image_refiner.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
562
+ "ref_image_refiner.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
563
+ "ref_image_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
564
+ "ref_image_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
565
+ "ref_image_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
566
+ "ref_image_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
567
+ "ref_image_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
568
+ "ref_image_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
569
+ "ref_image_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
570
+ "ref_image_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
571
+ "ref_image_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
572
+ "ref_image_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
573
+ "ref_image_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
574
+ "ref_image_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
575
+ "ref_image_refiner.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
576
+ "ref_image_refiner.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
577
+ "ref_image_refiner.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
578
+ "ref_image_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
579
+ "time_caption_embed.caption_embedder.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
580
+ "time_caption_embed.caption_embedder.1.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
581
+ "time_caption_embed.caption_embedder.1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
582
+ "time_caption_embed.timestep_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
583
+ "time_caption_embed.timestep_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
584
+ "time_caption_embed.timestep_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
585
+ "time_caption_embed.timestep_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
586
+ "x_embedder.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
587
+ "x_embedder.weight": "diffusion_pytorch_model-00001-of-00002.safetensors"
588
+ }
589
+ }
time_caption_reinit/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "OmniGen2Transformer2DModel",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "axes_dim_rope": [
5
+ 40,
6
+ 40,
7
+ 40
8
+ ],
9
+ "axes_lens": [
10
+ 10000,
11
+ 10000,
12
+ 10000
13
+ ],
14
+ "ffn_dim_multiplier": null,
15
+ "hidden_size": 2520,
16
+ "in_channels": 16,
17
+ "multiple_of": 256,
18
+ "norm_eps": 1e-05,
19
+ "num_attention_heads": 21,
20
+ "num_kv_heads": 7,
21
+ "num_layers": 32,
22
+ "num_refiner_layers": 2,
23
+ "out_channels": null,
24
+ "patch_size": 2,
25
+ "text_feat_dim": 3584,
26
+ "timestep_scale": 1000
27
+ }
time_caption_reinit/diffusion_pytorch_model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99b83b1c092eb2ce9f8e32a37f4243b3a9a8c347f77cdd0b38f28d995570f62a
3
+ size 9969124696
time_caption_reinit/diffusion_pytorch_model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d22479336b8e493506d13dea6c4d0ebadc94b0734e1a465ffc8023113733a45
3
+ size 5915071312
time_caption_reinit/diffusion_pytorch_model.safetensors.index.json ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15884134624
4
+ },
5
+ "weight_map": {
6
+ "context_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
7
+ "context_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
8
+ "context_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
9
+ "context_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
10
+ "context_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
11
+ "context_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
12
+ "context_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
13
+ "context_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
14
+ "context_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
15
+ "context_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
16
+ "context_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
17
+ "context_refiner.0.norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
18
+ "context_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
19
+ "context_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
20
+ "context_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
21
+ "context_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
22
+ "context_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
23
+ "context_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
24
+ "context_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
25
+ "context_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
26
+ "context_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
27
+ "context_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
28
+ "context_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
29
+ "context_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
30
+ "context_refiner.1.norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
31
+ "context_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
32
+ "image_index_embedding": "diffusion_pytorch_model-00001-of-00002.safetensors",
33
+ "layers.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
34
+ "layers.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
35
+ "layers.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
36
+ "layers.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
37
+ "layers.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
38
+ "layers.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
39
+ "layers.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
40
+ "layers.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
41
+ "layers.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
42
+ "layers.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
43
+ "layers.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
44
+ "layers.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
45
+ "layers.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
46
+ "layers.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
47
+ "layers.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
48
+ "layers.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
49
+ "layers.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
50
+ "layers.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
51
+ "layers.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
52
+ "layers.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
53
+ "layers.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
54
+ "layers.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
55
+ "layers.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
56
+ "layers.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
57
+ "layers.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
58
+ "layers.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
59
+ "layers.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
60
+ "layers.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
61
+ "layers.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
62
+ "layers.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
63
+ "layers.10.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
64
+ "layers.10.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
65
+ "layers.10.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
66
+ "layers.10.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
67
+ "layers.10.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
68
+ "layers.10.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
69
+ "layers.10.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
70
+ "layers.10.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
71
+ "layers.10.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
72
+ "layers.10.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
73
+ "layers.10.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
74
+ "layers.10.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
75
+ "layers.10.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
76
+ "layers.10.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
77
+ "layers.10.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
78
+ "layers.11.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
79
+ "layers.11.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
80
+ "layers.11.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
81
+ "layers.11.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
82
+ "layers.11.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
83
+ "layers.11.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
84
+ "layers.11.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
85
+ "layers.11.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
86
+ "layers.11.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
87
+ "layers.11.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
88
+ "layers.11.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
89
+ "layers.11.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
90
+ "layers.11.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
91
+ "layers.11.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
92
+ "layers.11.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
93
+ "layers.12.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
94
+ "layers.12.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
95
+ "layers.12.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
96
+ "layers.12.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
97
+ "layers.12.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
98
+ "layers.12.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
99
+ "layers.12.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
100
+ "layers.12.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
101
+ "layers.12.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
102
+ "layers.12.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
103
+ "layers.12.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
104
+ "layers.12.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
105
+ "layers.12.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
106
+ "layers.12.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
107
+ "layers.12.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
108
+ "layers.13.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
109
+ "layers.13.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
110
+ "layers.13.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
111
+ "layers.13.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
112
+ "layers.13.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
113
+ "layers.13.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
114
+ "layers.13.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
115
+ "layers.13.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
116
+ "layers.13.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
117
+ "layers.13.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
118
+ "layers.13.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
119
+ "layers.13.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
120
+ "layers.13.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
121
+ "layers.13.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
122
+ "layers.13.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
123
+ "layers.14.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
124
+ "layers.14.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
125
+ "layers.14.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
126
+ "layers.14.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
127
+ "layers.14.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
128
+ "layers.14.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
129
+ "layers.14.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
130
+ "layers.14.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
131
+ "layers.14.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
132
+ "layers.14.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
133
+ "layers.14.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
134
+ "layers.14.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
135
+ "layers.14.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
136
+ "layers.14.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
137
+ "layers.14.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
138
+ "layers.15.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
139
+ "layers.15.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
140
+ "layers.15.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
141
+ "layers.15.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
142
+ "layers.15.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
143
+ "layers.15.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
144
+ "layers.15.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
145
+ "layers.15.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
146
+ "layers.15.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
147
+ "layers.15.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
148
+ "layers.15.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
149
+ "layers.15.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
150
+ "layers.15.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
151
+ "layers.15.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
152
+ "layers.15.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
153
+ "layers.16.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
154
+ "layers.16.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
155
+ "layers.16.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
156
+ "layers.16.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
157
+ "layers.16.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
158
+ "layers.16.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
159
+ "layers.16.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
160
+ "layers.16.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
161
+ "layers.16.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
162
+ "layers.16.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
163
+ "layers.16.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
164
+ "layers.16.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
165
+ "layers.16.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
166
+ "layers.16.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
167
+ "layers.16.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
168
+ "layers.17.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
169
+ "layers.17.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
170
+ "layers.17.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
171
+ "layers.17.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
172
+ "layers.17.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
173
+ "layers.17.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
174
+ "layers.17.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
175
+ "layers.17.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
176
+ "layers.17.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
177
+ "layers.17.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
178
+ "layers.17.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
179
+ "layers.17.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
180
+ "layers.17.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
181
+ "layers.17.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
182
+ "layers.17.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
183
+ "layers.18.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
184
+ "layers.18.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
185
+ "layers.18.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
186
+ "layers.18.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
187
+ "layers.18.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
188
+ "layers.18.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
189
+ "layers.18.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
190
+ "layers.18.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
191
+ "layers.18.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
192
+ "layers.18.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
193
+ "layers.18.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
194
+ "layers.18.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
195
+ "layers.18.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
196
+ "layers.18.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
197
+ "layers.18.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
198
+ "layers.19.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
199
+ "layers.19.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
200
+ "layers.19.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
201
+ "layers.19.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
202
+ "layers.19.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
203
+ "layers.19.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
204
+ "layers.19.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
205
+ "layers.19.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
206
+ "layers.19.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
207
+ "layers.19.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
208
+ "layers.19.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
209
+ "layers.19.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
210
+ "layers.19.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
211
+ "layers.19.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
212
+ "layers.19.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
213
+ "layers.2.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
214
+ "layers.2.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
215
+ "layers.2.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
216
+ "layers.2.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
217
+ "layers.2.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
218
+ "layers.2.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
219
+ "layers.2.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
220
+ "layers.2.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
221
+ "layers.2.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
222
+ "layers.2.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
223
+ "layers.2.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
224
+ "layers.2.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
225
+ "layers.2.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
226
+ "layers.2.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
227
+ "layers.2.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
228
+ "layers.20.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
229
+ "layers.20.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
230
+ "layers.20.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
231
+ "layers.20.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
232
+ "layers.20.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
233
+ "layers.20.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
234
+ "layers.20.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
235
+ "layers.20.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
236
+ "layers.20.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
237
+ "layers.20.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
238
+ "layers.20.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
239
+ "layers.20.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
240
+ "layers.20.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
241
+ "layers.20.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
242
+ "layers.20.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
243
+ "layers.21.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
244
+ "layers.21.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
245
+ "layers.21.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
246
+ "layers.21.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
247
+ "layers.21.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
248
+ "layers.21.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
249
+ "layers.21.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
250
+ "layers.21.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
251
+ "layers.21.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
252
+ "layers.21.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
253
+ "layers.21.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
254
+ "layers.21.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
255
+ "layers.21.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
256
+ "layers.21.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
257
+ "layers.21.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
258
+ "layers.22.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
259
+ "layers.22.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
260
+ "layers.22.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
261
+ "layers.22.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
262
+ "layers.22.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
263
+ "layers.22.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
264
+ "layers.22.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
265
+ "layers.22.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
266
+ "layers.22.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
267
+ "layers.22.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
268
+ "layers.22.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
269
+ "layers.22.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
270
+ "layers.22.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
271
+ "layers.22.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
272
+ "layers.22.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
273
+ "layers.23.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
274
+ "layers.23.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
275
+ "layers.23.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
276
+ "layers.23.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
277
+ "layers.23.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
278
+ "layers.23.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
279
+ "layers.23.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
280
+ "layers.23.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
281
+ "layers.23.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
282
+ "layers.23.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
283
+ "layers.23.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
284
+ "layers.23.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
285
+ "layers.23.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
286
+ "layers.23.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
287
+ "layers.23.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
288
+ "layers.24.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
289
+ "layers.24.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
290
+ "layers.24.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
291
+ "layers.24.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
292
+ "layers.24.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
293
+ "layers.24.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
294
+ "layers.24.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
295
+ "layers.24.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
296
+ "layers.24.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
297
+ "layers.24.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
298
+ "layers.24.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
299
+ "layers.24.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
300
+ "layers.24.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
301
+ "layers.24.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
302
+ "layers.24.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
303
+ "layers.25.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
304
+ "layers.25.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
305
+ "layers.25.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
306
+ "layers.25.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
307
+ "layers.25.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
308
+ "layers.25.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
309
+ "layers.25.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
310
+ "layers.25.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
311
+ "layers.25.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
312
+ "layers.25.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
313
+ "layers.25.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
314
+ "layers.25.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
315
+ "layers.25.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
316
+ "layers.25.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
317
+ "layers.25.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
318
+ "layers.26.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
319
+ "layers.26.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
320
+ "layers.26.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
321
+ "layers.26.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
322
+ "layers.26.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
323
+ "layers.26.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
324
+ "layers.26.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
325
+ "layers.26.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
326
+ "layers.26.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
327
+ "layers.26.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
328
+ "layers.26.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
329
+ "layers.26.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
330
+ "layers.26.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
331
+ "layers.26.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
332
+ "layers.26.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
333
+ "layers.27.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
334
+ "layers.27.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
335
+ "layers.27.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
336
+ "layers.27.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
337
+ "layers.27.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
338
+ "layers.27.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
339
+ "layers.27.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
340
+ "layers.27.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
341
+ "layers.27.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
342
+ "layers.27.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
343
+ "layers.27.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
344
+ "layers.27.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
345
+ "layers.27.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
346
+ "layers.27.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
347
+ "layers.27.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
348
+ "layers.28.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
349
+ "layers.28.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
350
+ "layers.28.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
351
+ "layers.28.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
352
+ "layers.28.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
353
+ "layers.28.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
354
+ "layers.28.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
355
+ "layers.28.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
356
+ "layers.28.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
357
+ "layers.28.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
358
+ "layers.28.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
359
+ "layers.28.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
360
+ "layers.28.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
361
+ "layers.28.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
362
+ "layers.28.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
363
+ "layers.29.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
364
+ "layers.29.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
365
+ "layers.29.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
366
+ "layers.29.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
367
+ "layers.29.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
368
+ "layers.29.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
369
+ "layers.29.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
370
+ "layers.29.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
371
+ "layers.29.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
372
+ "layers.29.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
373
+ "layers.29.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
374
+ "layers.29.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
375
+ "layers.29.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
376
+ "layers.29.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
377
+ "layers.29.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
378
+ "layers.3.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
379
+ "layers.3.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
380
+ "layers.3.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
381
+ "layers.3.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
382
+ "layers.3.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
383
+ "layers.3.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
384
+ "layers.3.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
385
+ "layers.3.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
386
+ "layers.3.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
387
+ "layers.3.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
388
+ "layers.3.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
389
+ "layers.3.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
390
+ "layers.3.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
391
+ "layers.3.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
392
+ "layers.3.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
393
+ "layers.30.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
394
+ "layers.30.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
395
+ "layers.30.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
396
+ "layers.30.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
397
+ "layers.30.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
398
+ "layers.30.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
399
+ "layers.30.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
400
+ "layers.30.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
401
+ "layers.30.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
402
+ "layers.30.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
403
+ "layers.30.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
404
+ "layers.30.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
405
+ "layers.30.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
406
+ "layers.30.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
407
+ "layers.30.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
408
+ "layers.31.attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
409
+ "layers.31.attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
410
+ "layers.31.attn.to_k.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
411
+ "layers.31.attn.to_out.0.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
412
+ "layers.31.attn.to_q.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
413
+ "layers.31.attn.to_v.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
414
+ "layers.31.feed_forward.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
415
+ "layers.31.feed_forward.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
416
+ "layers.31.feed_forward.linear_3.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
417
+ "layers.31.ffn_norm1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
418
+ "layers.31.ffn_norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
419
+ "layers.31.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
420
+ "layers.31.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
421
+ "layers.31.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
422
+ "layers.31.norm2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
423
+ "layers.4.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
424
+ "layers.4.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
425
+ "layers.4.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
426
+ "layers.4.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
427
+ "layers.4.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
428
+ "layers.4.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
429
+ "layers.4.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
430
+ "layers.4.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
431
+ "layers.4.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
432
+ "layers.4.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
433
+ "layers.4.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
434
+ "layers.4.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
435
+ "layers.4.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
436
+ "layers.4.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
437
+ "layers.4.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
438
+ "layers.5.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
439
+ "layers.5.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
440
+ "layers.5.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
441
+ "layers.5.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
442
+ "layers.5.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
443
+ "layers.5.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
444
+ "layers.5.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
445
+ "layers.5.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
446
+ "layers.5.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
447
+ "layers.5.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
448
+ "layers.5.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
449
+ "layers.5.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
450
+ "layers.5.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
451
+ "layers.5.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
452
+ "layers.5.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
453
+ "layers.6.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
454
+ "layers.6.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
455
+ "layers.6.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
456
+ "layers.6.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
457
+ "layers.6.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
458
+ "layers.6.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
459
+ "layers.6.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
460
+ "layers.6.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
461
+ "layers.6.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
462
+ "layers.6.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
463
+ "layers.6.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
464
+ "layers.6.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
465
+ "layers.6.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
466
+ "layers.6.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
467
+ "layers.6.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
468
+ "layers.7.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
469
+ "layers.7.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
470
+ "layers.7.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
471
+ "layers.7.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
472
+ "layers.7.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
473
+ "layers.7.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
474
+ "layers.7.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
475
+ "layers.7.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
476
+ "layers.7.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
477
+ "layers.7.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
478
+ "layers.7.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
479
+ "layers.7.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
480
+ "layers.7.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
481
+ "layers.7.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
482
+ "layers.7.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
483
+ "layers.8.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
484
+ "layers.8.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
485
+ "layers.8.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
486
+ "layers.8.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
487
+ "layers.8.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
488
+ "layers.8.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
489
+ "layers.8.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
490
+ "layers.8.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
491
+ "layers.8.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
492
+ "layers.8.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
493
+ "layers.8.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
494
+ "layers.8.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
495
+ "layers.8.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
496
+ "layers.8.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
497
+ "layers.8.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
498
+ "layers.9.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
499
+ "layers.9.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
500
+ "layers.9.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
501
+ "layers.9.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
502
+ "layers.9.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
503
+ "layers.9.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
504
+ "layers.9.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
505
+ "layers.9.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
506
+ "layers.9.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
507
+ "layers.9.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
508
+ "layers.9.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
509
+ "layers.9.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
510
+ "layers.9.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
511
+ "layers.9.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
512
+ "layers.9.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
513
+ "noise_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
514
+ "noise_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
515
+ "noise_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
516
+ "noise_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
517
+ "noise_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
518
+ "noise_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
519
+ "noise_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
520
+ "noise_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
521
+ "noise_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
522
+ "noise_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
523
+ "noise_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
524
+ "noise_refiner.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
525
+ "noise_refiner.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
526
+ "noise_refiner.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
527
+ "noise_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
528
+ "noise_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
529
+ "noise_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
530
+ "noise_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
531
+ "noise_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
532
+ "noise_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
533
+ "noise_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
534
+ "noise_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
535
+ "noise_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
536
+ "noise_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
537
+ "noise_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
538
+ "noise_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
539
+ "noise_refiner.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
540
+ "noise_refiner.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
541
+ "noise_refiner.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
542
+ "noise_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
543
+ "norm_out.linear_1.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
544
+ "norm_out.linear_1.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
545
+ "norm_out.linear_2.bias": "diffusion_pytorch_model-00002-of-00002.safetensors",
546
+ "norm_out.linear_2.weight": "diffusion_pytorch_model-00002-of-00002.safetensors",
547
+ "ref_image_patch_embedder.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
548
+ "ref_image_patch_embedder.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
549
+ "ref_image_refiner.0.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
550
+ "ref_image_refiner.0.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
551
+ "ref_image_refiner.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
552
+ "ref_image_refiner.0.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
553
+ "ref_image_refiner.0.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
554
+ "ref_image_refiner.0.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
555
+ "ref_image_refiner.0.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
556
+ "ref_image_refiner.0.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
557
+ "ref_image_refiner.0.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
558
+ "ref_image_refiner.0.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
559
+ "ref_image_refiner.0.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
560
+ "ref_image_refiner.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
561
+ "ref_image_refiner.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
562
+ "ref_image_refiner.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
563
+ "ref_image_refiner.0.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
564
+ "ref_image_refiner.1.attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
565
+ "ref_image_refiner.1.attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
566
+ "ref_image_refiner.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
567
+ "ref_image_refiner.1.attn.to_out.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
568
+ "ref_image_refiner.1.attn.to_q.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
569
+ "ref_image_refiner.1.attn.to_v.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
570
+ "ref_image_refiner.1.feed_forward.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
571
+ "ref_image_refiner.1.feed_forward.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
572
+ "ref_image_refiner.1.feed_forward.linear_3.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
573
+ "ref_image_refiner.1.ffn_norm1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
574
+ "ref_image_refiner.1.ffn_norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
575
+ "ref_image_refiner.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
576
+ "ref_image_refiner.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
577
+ "ref_image_refiner.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
578
+ "ref_image_refiner.1.norm2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
579
+ "time_caption_embed.caption_embedder.0.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
580
+ "time_caption_embed.caption_embedder.1.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
581
+ "time_caption_embed.caption_embedder.1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
582
+ "time_caption_embed.timestep_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
583
+ "time_caption_embed.timestep_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
584
+ "time_caption_embed.timestep_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
585
+ "time_caption_embed.timestep_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00002.safetensors",
586
+ "x_embedder.bias": "diffusion_pytorch_model-00001-of-00002.safetensors",
587
+ "x_embedder.weight": "diffusion_pytorch_model-00001-of-00002.safetensors"
588
+ }
589
+ }
transformer/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "OmniGen2Transformer2DModel",
3
+ "_diffusers_version": "0.35.1",
4
+ "axes_dim_rope": [
5
+ 40,
6
+ 40,
7
+ 40
8
+ ],
9
+ "axes_lens": [
10
+ 10000,
11
+ 10000,
12
+ 10000
13
+ ],
14
+ "ffn_dim_multiplier": null,
15
+ "hidden_size": 2520,
16
+ "in_channels": 16,
17
+ "max_ref_images": 10,
18
+ "multiple_of": 256,
19
+ "norm_eps": 1e-05,
20
+ "num_attention_heads": 21,
21
+ "num_kv_heads": 7,
22
+ "num_layers": 32,
23
+ "num_refiner_layers": 2,
24
+ "out_channels": null,
25
+ "patch_size": 2,
26
+ "text_feat_dim": 2048,
27
+ "timestep_scale": 1000.0
28
+ }
transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa1206f502268c6be56b0958eadc94027fd5dff78a29d33d18a581db3bd45319
3
+ size 7934409984
vae/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.33.1",
4
+ "_name_or_path": "/share_2/luoxin/modelscope/hub/models/FLUX.1-dev",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 16,
21
+ "latents_mean": null,
22
+ "latents_std": null,
23
+ "layers_per_block": 2,
24
+ "mid_block_add_attention": true,
25
+ "norm_num_groups": 32,
26
+ "out_channels": 3,
27
+ "sample_size": 1024,
28
+ "scaling_factor": 0.3611,
29
+ "shift_factor": 0.1159,
30
+ "up_block_types": [
31
+ "UpDecoderBlock2D",
32
+ "UpDecoderBlock2D",
33
+ "UpDecoderBlock2D",
34
+ "UpDecoderBlock2D"
35
+ ],
36
+ "use_post_quant_conv": false,
37
+ "use_quant_conv": false
38
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94
3
+ size 335306212