John6666 commited on
Commit
3a9c7c3
·
verified ·
1 Parent(s): 38b59d3

Upload 9 files

Browse files
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Test57
3
- emoji: 📚
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 4.41.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: test
3
+ emoji: 🧨
4
+ colorFrom: yellow
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.38.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from merge_gr import gen_repo_list, upload_repo_list, clear_repo_list, process_repos_gr
3
+
4
+ css = """"""
5
+
6
+ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
7
+ gr.Markdown("# SDXL/SD1.5 DARE Merger (experiment)")
8
+ gr.Markdown(
9
+ f"""
10
+ This Space is a mod version of [martyn](https://huggingface.co/martyn)'s [safetensors-merge-supermario](https://github.com/martyn/safetensors-merge-supermario) forced to be compatible with Diffusers.
11
+ Since the space is completely experimental and unfinished,
12
+ I recommend using [ComfyUI-DareMerge](https://github.com/54rt1n/ComfyUI-DareMerge)
13
+ or [WebUI SuperMerger](https://github.com/hako-mikan/sd-webui-supermerger) for actual merging.
14
+ Also, I think most safetensors models with the same structure can be merged even if they are not SD models, but I haven't tried.<br>
15
+ **⚠️IMPORTANT NOTICE⚠️**<br>
16
+ From an information security standpoint, it is dangerous to expose your access token or key to others.
17
+ If you do use it, I recommend that you duplicate this space on your own account before running.
18
+ Keys and tokens could be set to SECRET (HF_TOKEN) if it's placed in your own space.
19
+ It saves you the trouble of typing them in.<br>
20
+ <br>
21
+ **The steps are the following**:
22
+ - Paste a write-access token from [hf.co/settings/tokens](https://huggingface.co/settings/tokens).
23
+ - Input a model download url from the Hub.
24
+ - Input your HF user ID. e.g. 'yourid'.
25
+ - Input your new merged repo name.
26
+ - Input information for merging models.
27
+ - Set the parameters. If not sure, just use the defaults.
28
+ - Click "Submit".
29
+ - Patiently wait until the output changes. It takes approximately 5~6 minutes (downloading from HF).
30
+ """
31
+ )
32
+ with gr.Column():
33
+ with gr.Group():
34
+ hf_token = gr.Textbox(label="Your HF write token", placeholder="hf_...", value="", max_lines=1)
35
+ with gr.Row():
36
+ hf_user = gr.Textbox(label="Your HF user ID", placeholder="username", value="", max_lines=1)
37
+ hf_repo = gr.Textbox(label="New repo name", placeholder="reponame", value="", max_lines=1)
38
+ with gr.Group():
39
+ with gr.Accordion("YAML", open=True):
40
+ merge_yaml_input = gr.Textbox(label="List of Repos or URLs to merge", placeholder="author/repo\nauthor/repo\n...", value="", lines=4)
41
+ merge_yaml_md = gr.Markdown()
42
+ merge_yaml_upload = gr.UploadButton(label="Upload YAML file", file_types=["text"])
43
+ merge_yaml_clear = gr.Button("Clear YAML files")
44
+ with gr.Row():
45
+ merge_p = gr.Number(label="Default dropout probability", value=0.5, minimum=0, maximum=1.0, step=0.01)
46
+ merge_lambda = gr.Number(label="Default scaling factor for the weight delta", value=1.0, minimum=0, maximum=2.0, step=0.01)
47
+ merge_mode = gr.Radio(label="Mode", choices=["SDXL", "SD1.5", "Single files"], value="SDXL")
48
+ merge_is_upload_sf = gr.Checkbox(label="Convert Diffusers files to single safetensors file", value=False)
49
+ merge_skip = gr.CheckboxGroup(label="Skip Diffusers folders", choices=["vae", "text_encoder", "text_encoder_2", "text_encoder_3"], value=["vae", "text_encoder"])
50
+ merge_is_upload = gr.Checkbox(label="Upload files into new Repo", value=True)
51
+ merge_repo_exists_ok = gr.Checkbox(label="Overwrite exsisting Repo", value=False)
52
+ run_button = gr.Button(value="Submit")
53
+ repo_urls = gr.CheckboxGroup(visible=False, choices=[], value=None)
54
+ output_md = gr.Markdown(label="Output")
55
+ merge_files = gr.Files(label="Download", interactive=False, value=[])
56
+
57
+ merge_yaml_input.change(gen_repo_list, [merge_yaml_input, merge_p, merge_lambda], [merge_yaml_md], queue=False)
58
+ merge_yaml_upload.upload(upload_repo_list, [merge_yaml_upload, merge_p, merge_lambda], [merge_yaml_md], queue=False)
59
+ merge_yaml_clear.click(clear_repo_list, None, [merge_yaml_input, merge_yaml_md], queue=False)
60
+ gr.on(
61
+ triggers=[run_button.click],
62
+ fn=process_repos_gr,
63
+ inputs=[merge_mode, merge_p, merge_lambda, merge_skip, hf_user, hf_repo, hf_token,
64
+ merge_is_upload, merge_is_upload_sf, merge_repo_exists_ok, merge_files, repo_urls],
65
+ outputs=[merge_files, repo_urls, output_md],
66
+ )
67
+
68
+ demo.queue()
69
+ demo.launch()
convert_repo_to_safetensors_gr.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
2
+ # *Only* converts the UNet, VAE, and Text Encoder.
3
+ # Does not convert optimizer state or any other thing.
4
+
5
+ import argparse
6
+ import os.path as osp
7
+ import re
8
+
9
+ import torch
10
+ from safetensors.torch import load_file, save_file
11
+ import gradio as gr
12
+
13
+ # =================#
14
+ # UNet Conversion #
15
+ # =================#
16
+
17
+ unet_conversion_map = [
18
+ # (stable-diffusion, HF Diffusers)
19
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
20
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
21
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
22
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
23
+ ("input_blocks.0.0.weight", "conv_in.weight"),
24
+ ("input_blocks.0.0.bias", "conv_in.bias"),
25
+ ("out.0.weight", "conv_norm_out.weight"),
26
+ ("out.0.bias", "conv_norm_out.bias"),
27
+ ("out.2.weight", "conv_out.weight"),
28
+ ("out.2.bias", "conv_out.bias"),
29
+ # the following are for sdxl
30
+ ("label_emb.0.0.weight", "add_embedding.linear_1.weight"),
31
+ ("label_emb.0.0.bias", "add_embedding.linear_1.bias"),
32
+ ("label_emb.0.2.weight", "add_embedding.linear_2.weight"),
33
+ ("label_emb.0.2.bias", "add_embedding.linear_2.bias"),
34
+ ]
35
+
36
+ unet_conversion_map_resnet = [
37
+ # (stable-diffusion, HF Diffusers)
38
+ ("in_layers.0", "norm1"),
39
+ ("in_layers.2", "conv1"),
40
+ ("out_layers.0", "norm2"),
41
+ ("out_layers.3", "conv2"),
42
+ ("emb_layers.1", "time_emb_proj"),
43
+ ("skip_connection", "conv_shortcut"),
44
+ ]
45
+
46
+ unet_conversion_map_layer = []
47
+ # hardcoded number of downblocks and resnets/attentions...
48
+ # would need smarter logic for other networks.
49
+ for i in range(3):
50
+ # loop over downblocks/upblocks
51
+
52
+ for j in range(2):
53
+ # loop over resnets/attentions for downblocks
54
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
55
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
56
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
57
+
58
+ if i > 0:
59
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
60
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
61
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
62
+
63
+ for j in range(4):
64
+ # loop over resnets/attentions for upblocks
65
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
66
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
67
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
68
+
69
+ if i < 2:
70
+ # no attention layers in up_blocks.0
71
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
72
+ sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
73
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
74
+
75
+ if i < 3:
76
+ # no downsample in down_blocks.3
77
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
78
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
79
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
80
+
81
+ # no upsample in up_blocks.3
82
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
83
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
84
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
85
+ unet_conversion_map_layer.append(("output_blocks.2.2.conv.", "output_blocks.2.1.conv."))
86
+
87
+ hf_mid_atn_prefix = "mid_block.attentions.0."
88
+ sd_mid_atn_prefix = "middle_block.1."
89
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
90
+ for j in range(2):
91
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
92
+ sd_mid_res_prefix = f"middle_block.{2*j}."
93
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
94
+
95
+
96
+ def convert_unet_state_dict(unet_state_dict):
97
+ # buyer beware: this is a *brittle* function,
98
+ # and correct output requires that all of these pieces interact in
99
+ # the exact order in which I have arranged them.
100
+ mapping = {k: k for k in unet_state_dict.keys()}
101
+ for sd_name, hf_name in unet_conversion_map:
102
+ mapping[hf_name] = sd_name
103
+ for k, v in mapping.items():
104
+ if "resnets" in k:
105
+ for sd_part, hf_part in unet_conversion_map_resnet:
106
+ v = v.replace(hf_part, sd_part)
107
+ mapping[k] = v
108
+ for k, v in mapping.items():
109
+ for sd_part, hf_part in unet_conversion_map_layer:
110
+ v = v.replace(hf_part, sd_part)
111
+ mapping[k] = v
112
+ new_state_dict = {sd_name: unet_state_dict[hf_name] for hf_name, sd_name in mapping.items()}
113
+ return new_state_dict
114
+
115
+
116
+ # ================#
117
+ # VAE Conversion #
118
+ # ================#
119
+
120
+ vae_conversion_map = [
121
+ # (stable-diffusion, HF Diffusers)
122
+ ("nin_shortcut", "conv_shortcut"),
123
+ ("norm_out", "conv_norm_out"),
124
+ ("mid.attn_1.", "mid_block.attentions.0."),
125
+ ]
126
+
127
+ for i in range(4):
128
+ # down_blocks have two resnets
129
+ for j in range(2):
130
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
131
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
132
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
133
+
134
+ if i < 3:
135
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
136
+ sd_downsample_prefix = f"down.{i}.downsample."
137
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
138
+
139
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
140
+ sd_upsample_prefix = f"up.{3-i}.upsample."
141
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
142
+
143
+ # up_blocks have three resnets
144
+ # also, up blocks in hf are numbered in reverse from sd
145
+ for j in range(3):
146
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
147
+ sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
148
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
149
+
150
+ # this part accounts for mid blocks in both the encoder and the decoder
151
+ for i in range(2):
152
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
153
+ sd_mid_res_prefix = f"mid.block_{i+1}."
154
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
155
+
156
+
157
+ vae_conversion_map_attn = [
158
+ # (stable-diffusion, HF Diffusers)
159
+ ("norm.", "group_norm."),
160
+ # the following are for SDXL
161
+ ("q.", "to_q."),
162
+ ("k.", "to_k."),
163
+ ("v.", "to_v."),
164
+ ("proj_out.", "to_out.0."),
165
+ ]
166
+
167
+
168
+ def reshape_weight_for_sd(w):
169
+ # convert HF linear weights to SD conv2d weights
170
+ if not w.ndim == 1:
171
+ return w.reshape(*w.shape, 1, 1)
172
+ else:
173
+ return w
174
+
175
+
176
+ def convert_vae_state_dict(vae_state_dict):
177
+ mapping = {k: k for k in vae_state_dict.keys()}
178
+ for k, v in mapping.items():
179
+ for sd_part, hf_part in vae_conversion_map:
180
+ v = v.replace(hf_part, sd_part)
181
+ mapping[k] = v
182
+ for k, v in mapping.items():
183
+ if "attentions" in k:
184
+ for sd_part, hf_part in vae_conversion_map_attn:
185
+ v = v.replace(hf_part, sd_part)
186
+ mapping[k] = v
187
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
188
+ weights_to_convert = ["q", "k", "v", "proj_out"]
189
+ for k, v in new_state_dict.items():
190
+ for weight_name in weights_to_convert:
191
+ if f"mid.attn_1.{weight_name}.weight" in k:
192
+ print(f"Reshaping {k} for SD format")
193
+ new_state_dict[k] = reshape_weight_for_sd(v)
194
+ return new_state_dict
195
+
196
+
197
+ # =========================#
198
+ # Text Encoder Conversion #
199
+ # =========================#
200
+
201
+
202
+ textenc_conversion_lst = [
203
+ # (stable-diffusion, HF Diffusers)
204
+ ("transformer.resblocks.", "text_model.encoder.layers."),
205
+ ("ln_1", "layer_norm1"),
206
+ ("ln_2", "layer_norm2"),
207
+ (".c_fc.", ".fc1."),
208
+ (".c_proj.", ".fc2."),
209
+ (".attn", ".self_attn"),
210
+ ("ln_final.", "text_model.final_layer_norm."),
211
+ ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"),
212
+ ("positional_embedding", "text_model.embeddings.position_embedding.weight"),
213
+ ]
214
+ protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
215
+ textenc_pattern = re.compile("|".join(protected.keys()))
216
+
217
+ # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
218
+ code2idx = {"q": 0, "k": 1, "v": 2}
219
+
220
+
221
+ def convert_openclip_text_enc_state_dict(text_enc_dict):
222
+ new_state_dict = {}
223
+ capture_qkv_weight = {}
224
+ capture_qkv_bias = {}
225
+ for k, v in text_enc_dict.items():
226
+ if (
227
+ k.endswith(".self_attn.q_proj.weight")
228
+ or k.endswith(".self_attn.k_proj.weight")
229
+ or k.endswith(".self_attn.v_proj.weight")
230
+ ):
231
+ k_pre = k[: -len(".q_proj.weight")]
232
+ k_code = k[-len("q_proj.weight")]
233
+ if k_pre not in capture_qkv_weight:
234
+ capture_qkv_weight[k_pre] = [None, None, None]
235
+ capture_qkv_weight[k_pre][code2idx[k_code]] = v
236
+ continue
237
+
238
+ if (
239
+ k.endswith(".self_attn.q_proj.bias")
240
+ or k.endswith(".self_attn.k_proj.bias")
241
+ or k.endswith(".self_attn.v_proj.bias")
242
+ ):
243
+ k_pre = k[: -len(".q_proj.bias")]
244
+ k_code = k[-len("q_proj.bias")]
245
+ if k_pre not in capture_qkv_bias:
246
+ capture_qkv_bias[k_pre] = [None, None, None]
247
+ capture_qkv_bias[k_pre][code2idx[k_code]] = v
248
+ continue
249
+
250
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
251
+ new_state_dict[relabelled_key] = v
252
+
253
+ for k_pre, tensors in capture_qkv_weight.items():
254
+ if None in tensors:
255
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
256
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
257
+ new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors)
258
+
259
+ for k_pre, tensors in capture_qkv_bias.items():
260
+ if None in tensors:
261
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
262
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
263
+ new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors)
264
+
265
+ return new_state_dict
266
+
267
+
268
+ def convert_openai_text_enc_state_dict(text_enc_dict):
269
+ return text_enc_dict
270
+
271
+
272
+ def convert_diffusers_to_safetensors(model_path, checkpoint_path, half = True, progress=gr.Progress(track_tqdm=True)):
273
+ progress(0, desc="Start converting...")
274
+ # Path for safetensors
275
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors")
276
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors")
277
+ text_enc_path = osp.join(model_path, "text_encoder", "model.safetensors")
278
+ text_enc_2_path = osp.join(model_path, "text_encoder_2", "model.safetensors")
279
+
280
+ # Load models from safetensors if it exists, if it doesn't pytorch
281
+ if osp.exists(unet_path):
282
+ unet_state_dict = load_file(unet_path, device="cpu")
283
+ else:
284
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
285
+ unet_state_dict = torch.load(unet_path, map_location="cpu")
286
+
287
+ if osp.exists(vae_path):
288
+ vae_state_dict = load_file(vae_path, device="cpu")
289
+ else:
290
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
291
+ vae_state_dict = torch.load(vae_path, map_location="cpu")
292
+
293
+ if osp.exists(text_enc_path):
294
+ text_enc_dict = load_file(text_enc_path, device="cpu")
295
+ else:
296
+ text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
297
+ text_enc_dict = torch.load(text_enc_path, map_location="cpu")
298
+
299
+ if osp.exists(text_enc_2_path):
300
+ text_enc_2_dict = load_file(text_enc_2_path, device="cpu")
301
+ else:
302
+ text_enc_2_path = osp.join(model_path, "text_encoder_2", "pytorch_model.bin")
303
+ text_enc_2_dict = torch.load(text_enc_2_path, map_location="cpu")
304
+
305
+ # Convert the UNet model
306
+ unet_state_dict = convert_unet_state_dict(unet_state_dict)
307
+ unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
308
+
309
+ # Convert the VAE model
310
+ vae_state_dict = convert_vae_state_dict(vae_state_dict)
311
+ vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
312
+
313
+ # Convert text encoder 1
314
+ text_enc_dict = convert_openai_text_enc_state_dict(text_enc_dict)
315
+ text_enc_dict = {"conditioner.embedders.0.transformer." + k: v for k, v in text_enc_dict.items()}
316
+
317
+ # Convert text encoder 2
318
+ text_enc_2_dict = convert_openclip_text_enc_state_dict(text_enc_2_dict)
319
+ text_enc_2_dict = {"conditioner.embedders.1.model." + k: v for k, v in text_enc_2_dict.items()}
320
+ # We call the `.T.contiguous()` to match what's done in
321
+ # https://github.com/huggingface/diffusers/blob/84905ca7287876b925b6bf8e9bb92fec21c78764/src/diffusers/loaders/single_file_utils.py#L1085
322
+ text_enc_2_dict["conditioner.embedders.1.model.text_projection"] = text_enc_2_dict.pop(
323
+ "conditioner.embedders.1.model.text_projection.weight"
324
+ ).T.contiguous()
325
+
326
+ # Put together new checkpoint
327
+ state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict, **text_enc_2_dict}
328
+
329
+ if half:
330
+ state_dict = {k: v.half() for k, v in state_dict.items()}
331
+
332
+ save_file(state_dict, checkpoint_path)
333
+ progress(1, desc="Converted.")
334
+
335
+
336
+ def download_repo(repo_id, dir_path, progress=gr.Progress(track_tqdm=True)):
337
+ from huggingface_hub import snapshot_download
338
+ try:
339
+ snapshot_download(repo_id=repo_id, local_dir=dir_path)
340
+ except Exception as e:
341
+ print(f"Error: Failed to download {repo_id}. ")
342
+ return
343
+
344
+
345
+ def upload_safetensors_to_repo(filename, progress=gr.Progress(track_tqdm=True)):
346
+ from huggingface_hub import HfApi, hf_hub_url
347
+ import os
348
+ from pathlib import Path
349
+ output_filename = Path(filename).name
350
+ hf_token = os.environ.get("HF_TOKEN")
351
+ repo_id = os.environ.get("HF_OUTPUT_REPO")
352
+ api = HfApi()
353
+ try:
354
+ progress(0, desc="Start uploading...")
355
+ api.upload_file(path_or_fileobj=filename, path_in_repo=output_filename, repo_id=repo_id, token=hf_token)
356
+ progress(1, desc="Uploaded.")
357
+ url = hf_hub_url(repo_id=repo_id, filename=output_filename)
358
+ except Exception as e:
359
+ print(f"Error: Failed to upload to {repo_id}. ")
360
+ return None
361
+ return url
362
+
363
+
364
+ def convert_repo_to_safetensors(repo_id, half=True, progress=gr.Progress(track_tqdm=True)):
365
+ download_dir = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}"
366
+ output_filename = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}.safetensors"
367
+ download_repo(repo_id, download_dir)
368
+ convert_diffusers_to_safetensors(download_dir, output_filename)
369
+ return output_filename
370
+
371
+
372
+ def convert_repo_to_safetensors_multi(repo_id, files, is_upload, urls, half=True, progress=gr.Progress(track_tqdm=True)):
373
+ file = convert_repo_to_safetensors(repo_id)
374
+ if not urls: urls = []
375
+ url = ""
376
+ if is_upload:
377
+ url = upload_safetensors_to_repo(file)
378
+ if url: urls.append(url)
379
+ md = ""
380
+ for u in urls:
381
+ md += f"[Download {str(u).split('/')[-1]}]({str(u)})<br>"
382
+ if not files: files = []
383
+ files.append(file)
384
+ return gr.update(value=files), gr.update(value=urls, choices=urls), gr.update(value=md)
385
+
386
+
387
+ if __name__ == "__main__":
388
+ parser = argparse.ArgumentParser()
389
+
390
+ parser.add_argument("--repo_id", default=None, type=str, required=True, help="HF Repo ID of the model to convert.")
391
+ parser.add_argument("--half", default=True, help="Save weights in half precision.")
392
+
393
+ args = parser.parse_args()
394
+ assert args.repo_id is not None, "Must provide a Repo ID!"
395
+
396
+ convert_repo_to_safetensors(args.repo_id, args.half)
397
+
398
+
399
+ # Usage: python convert_repo_to_safetensors.py --repo_id GraydientPlatformAPI/goodfit-pony41-xl
convert_repo_to_safetensors_sd_gr.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
2
+ # *Only* converts the UNet, VAE, and Text Encoder.
3
+ # Does not convert optimizer state or any other thing.
4
+
5
+ import argparse
6
+ import os.path as osp
7
+ import re
8
+
9
+ import torch
10
+ from safetensors.torch import load_file, save_file
11
+ import gradio as gr
12
+
13
+
14
+ # =================#
15
+ # UNet Conversion #
16
+ # =================#
17
+
18
+ unet_conversion_map = [
19
+ # (stable-diffusion, HF Diffusers)
20
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
21
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
22
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
23
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
24
+ ("input_blocks.0.0.weight", "conv_in.weight"),
25
+ ("input_blocks.0.0.bias", "conv_in.bias"),
26
+ ("out.0.weight", "conv_norm_out.weight"),
27
+ ("out.0.bias", "conv_norm_out.bias"),
28
+ ("out.2.weight", "conv_out.weight"),
29
+ ("out.2.bias", "conv_out.bias"),
30
+ ]
31
+
32
+ unet_conversion_map_resnet = [
33
+ # (stable-diffusion, HF Diffusers)
34
+ ("in_layers.0", "norm1"),
35
+ ("in_layers.2", "conv1"),
36
+ ("out_layers.0", "norm2"),
37
+ ("out_layers.3", "conv2"),
38
+ ("emb_layers.1", "time_emb_proj"),
39
+ ("skip_connection", "conv_shortcut"),
40
+ ]
41
+
42
+ unet_conversion_map_layer = []
43
+ # hardcoded number of downblocks and resnets/attentions...
44
+ # would need smarter logic for other networks.
45
+ for i in range(4):
46
+ # loop over downblocks/upblocks
47
+
48
+ for j in range(2):
49
+ # loop over resnets/attentions for downblocks
50
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
51
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
52
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
53
+
54
+ if i < 3:
55
+ # no attention layers in down_blocks.3
56
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
57
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
58
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
59
+
60
+ for j in range(3):
61
+ # loop over resnets/attentions for upblocks
62
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
63
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
64
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
65
+
66
+ if i > 0:
67
+ # no attention layers in up_blocks.0
68
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
69
+ sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
70
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
71
+
72
+ if i < 3:
73
+ # no downsample in down_blocks.3
74
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
75
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
76
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
77
+
78
+ # no upsample in up_blocks.3
79
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
80
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
81
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
82
+
83
+ hf_mid_atn_prefix = "mid_block.attentions.0."
84
+ sd_mid_atn_prefix = "middle_block.1."
85
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
86
+
87
+ for j in range(2):
88
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
89
+ sd_mid_res_prefix = f"middle_block.{2*j}."
90
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
91
+
92
+
93
+ def convert_unet_state_dict(unet_state_dict):
94
+ # buyer beware: this is a *brittle* function,
95
+ # and correct output requires that all of these pieces interact in
96
+ # the exact order in which I have arranged them.
97
+ mapping = {k: k for k in unet_state_dict.keys()}
98
+ for sd_name, hf_name in unet_conversion_map:
99
+ mapping[hf_name] = sd_name
100
+ for k, v in mapping.items():
101
+ if "resnets" in k:
102
+ for sd_part, hf_part in unet_conversion_map_resnet:
103
+ v = v.replace(hf_part, sd_part)
104
+ mapping[k] = v
105
+ for k, v in mapping.items():
106
+ for sd_part, hf_part in unet_conversion_map_layer:
107
+ v = v.replace(hf_part, sd_part)
108
+ mapping[k] = v
109
+ new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
110
+ return new_state_dict
111
+
112
+
113
+ # ================#
114
+ # VAE Conversion #
115
+ # ================#
116
+
117
+ vae_conversion_map = [
118
+ # (stable-diffusion, HF Diffusers)
119
+ ("nin_shortcut", "conv_shortcut"),
120
+ ("norm_out", "conv_norm_out"),
121
+ ("mid.attn_1.", "mid_block.attentions.0."),
122
+ ]
123
+
124
+ for i in range(4):
125
+ # down_blocks have two resnets
126
+ for j in range(2):
127
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
128
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
129
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
130
+
131
+ if i < 3:
132
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
133
+ sd_downsample_prefix = f"down.{i}.downsample."
134
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
135
+
136
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
137
+ sd_upsample_prefix = f"up.{3-i}.upsample."
138
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
139
+
140
+ # up_blocks have three resnets
141
+ # also, up blocks in hf are numbered in reverse from sd
142
+ for j in range(3):
143
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
144
+ sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
145
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
146
+
147
+ # this part accounts for mid blocks in both the encoder and the decoder
148
+ for i in range(2):
149
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
150
+ sd_mid_res_prefix = f"mid.block_{i+1}."
151
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
152
+
153
+
154
+ vae_conversion_map_attn = [
155
+ # (stable-diffusion, HF Diffusers)
156
+ ("norm.", "group_norm."),
157
+ ("q.", "query."),
158
+ ("k.", "key."),
159
+ ("v.", "value."),
160
+ ("proj_out.", "proj_attn."),
161
+ ]
162
+
163
+ # This is probably not the most ideal solution, but it does work.
164
+ vae_extra_conversion_map = [
165
+ ("to_q", "q"),
166
+ ("to_k", "k"),
167
+ ("to_v", "v"),
168
+ ("to_out.0", "proj_out"),
169
+ ]
170
+
171
+
172
+ def reshape_weight_for_sd(w):
173
+ # convert HF linear weights to SD conv2d weights
174
+ if not w.ndim == 1:
175
+ return w.reshape(*w.shape, 1, 1)
176
+ else:
177
+ return w
178
+
179
+
180
+ def convert_vae_state_dict(vae_state_dict):
181
+ mapping = {k: k for k in vae_state_dict.keys()}
182
+ for k, v in mapping.items():
183
+ for sd_part, hf_part in vae_conversion_map:
184
+ v = v.replace(hf_part, sd_part)
185
+ mapping[k] = v
186
+ for k, v in mapping.items():
187
+ if "attentions" in k:
188
+ for sd_part, hf_part in vae_conversion_map_attn:
189
+ v = v.replace(hf_part, sd_part)
190
+ mapping[k] = v
191
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
192
+ weights_to_convert = ["q", "k", "v", "proj_out"]
193
+ keys_to_rename = {}
194
+ for k, v in new_state_dict.items():
195
+ for weight_name in weights_to_convert:
196
+ if f"mid.attn_1.{weight_name}.weight" in k:
197
+ print(f"Reshaping {k} for SD format")
198
+ new_state_dict[k] = reshape_weight_for_sd(v)
199
+ for weight_name, real_weight_name in vae_extra_conversion_map:
200
+ if f"mid.attn_1.{weight_name}.weight" in k or f"mid.attn_1.{weight_name}.bias" in k:
201
+ keys_to_rename[k] = k.replace(weight_name, real_weight_name)
202
+ for k, v in keys_to_rename.items():
203
+ if k in new_state_dict:
204
+ print(f"Renaming {k} to {v}")
205
+ new_state_dict[v] = reshape_weight_for_sd(new_state_dict[k])
206
+ del new_state_dict[k]
207
+ return new_state_dict
208
+
209
+
210
+ # =========================#
211
+ # Text Encoder Conversion #
212
+ # =========================#
213
+
214
+
215
+ textenc_conversion_lst = [
216
+ # (stable-diffusion, HF Diffusers)
217
+ ("resblocks.", "text_model.encoder.layers."),
218
+ ("ln_1", "layer_norm1"),
219
+ ("ln_2", "layer_norm2"),
220
+ (".c_fc.", ".fc1."),
221
+ (".c_proj.", ".fc2."),
222
+ (".attn", ".self_attn"),
223
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
224
+ ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
225
+ ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
226
+ ]
227
+ protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
228
+ textenc_pattern = re.compile("|".join(protected.keys()))
229
+
230
+ # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
231
+ code2idx = {"q": 0, "k": 1, "v": 2}
232
+
233
+
234
+ def convert_text_enc_state_dict_v20(text_enc_dict):
235
+ new_state_dict = {}
236
+ capture_qkv_weight = {}
237
+ capture_qkv_bias = {}
238
+ for k, v in text_enc_dict.items():
239
+ if (
240
+ k.endswith(".self_attn.q_proj.weight")
241
+ or k.endswith(".self_attn.k_proj.weight")
242
+ or k.endswith(".self_attn.v_proj.weight")
243
+ ):
244
+ k_pre = k[: -len(".q_proj.weight")]
245
+ k_code = k[-len("q_proj.weight")]
246
+ if k_pre not in capture_qkv_weight:
247
+ capture_qkv_weight[k_pre] = [None, None, None]
248
+ capture_qkv_weight[k_pre][code2idx[k_code]] = v
249
+ continue
250
+
251
+ if (
252
+ k.endswith(".self_attn.q_proj.bias")
253
+ or k.endswith(".self_attn.k_proj.bias")
254
+ or k.endswith(".self_attn.v_proj.bias")
255
+ ):
256
+ k_pre = k[: -len(".q_proj.bias")]
257
+ k_code = k[-len("q_proj.bias")]
258
+ if k_pre not in capture_qkv_bias:
259
+ capture_qkv_bias[k_pre] = [None, None, None]
260
+ capture_qkv_bias[k_pre][code2idx[k_code]] = v
261
+ continue
262
+
263
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
264
+ new_state_dict[relabelled_key] = v
265
+
266
+ for k_pre, tensors in capture_qkv_weight.items():
267
+ if None in tensors:
268
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
269
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
270
+ new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors)
271
+
272
+ for k_pre, tensors in capture_qkv_bias.items():
273
+ if None in tensors:
274
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
275
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
276
+ new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors)
277
+
278
+ return new_state_dict
279
+
280
+
281
+ def convert_text_enc_state_dict(text_enc_dict):
282
+ return text_enc_dict
283
+
284
+
285
+ def convert_diffusers_to_safetensors(model_path, checkpoint_path, half = True, progress=gr.Progress(track_tqdm=True)):
286
+ progress(0, desc="Start converting...")
287
+ # Path for safetensors
288
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors")
289
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors")
290
+ text_enc_path = osp.join(model_path, "text_encoder", "model.safetensors")
291
+
292
+ # Load models from safetensors if it exists, if it doesn't pytorch
293
+ if osp.exists(unet_path):
294
+ unet_state_dict = load_file(unet_path, device="cpu")
295
+ else:
296
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
297
+ unet_state_dict = torch.load(unet_path, map_location="cpu")
298
+
299
+ if osp.exists(vae_path):
300
+ vae_state_dict = load_file(vae_path, device="cpu")
301
+ else:
302
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
303
+ vae_state_dict = torch.load(vae_path, map_location="cpu")
304
+
305
+ if osp.exists(text_enc_path):
306
+ text_enc_dict = load_file(text_enc_path, device="cpu")
307
+ else:
308
+ text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
309
+ text_enc_dict = torch.load(text_enc_path, map_location="cpu")
310
+
311
+ # Convert the UNet model
312
+ unet_state_dict = convert_unet_state_dict(unet_state_dict)
313
+ unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
314
+
315
+ # Convert the VAE model
316
+ vae_state_dict = convert_vae_state_dict(vae_state_dict)
317
+ vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
318
+
319
+ # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
320
+ is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
321
+
322
+ if is_v20_model:
323
+ # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
324
+ text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()}
325
+ text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict)
326
+ text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
327
+ else:
328
+ text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
329
+ text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
330
+
331
+ # Put together new checkpoint
332
+ state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
333
+ if half:
334
+ state_dict = {k: v.half() for k, v in state_dict.items()}
335
+
336
+ save_file(state_dict, checkpoint_path)
337
+
338
+ progress(1, desc="Converted.")
339
+
340
+
341
+ def download_repo(repo_id, dir_path, progress=gr.Progress(track_tqdm=True)):
342
+ from huggingface_hub import snapshot_download
343
+ try:
344
+ snapshot_download(repo_id=repo_id, local_dir=dir_path)
345
+ except Exception as e:
346
+ print(f"Error: Failed to download {repo_id}. ")
347
+ return
348
+
349
+
350
+ def upload_safetensors_to_repo(filename, progress=gr.Progress(track_tqdm=True)):
351
+ from huggingface_hub import HfApi, hf_hub_url
352
+ import os
353
+ from pathlib import Path
354
+ output_filename = Path(filename).name
355
+ hf_token = os.environ.get("HF_TOKEN")
356
+ repo_id = os.environ.get("HF_OUTPUT_REPO")
357
+ api = HfApi()
358
+ try:
359
+ progress(0, desc="Start uploading...")
360
+ api.upload_file(path_or_fileobj=filename, path_in_repo=output_filename, repo_id=repo_id, token=hf_token)
361
+ progress(1, desc="Uploaded.")
362
+ url = hf_hub_url(repo_id=repo_id, filename=output_filename)
363
+ except Exception as e:
364
+ print(f"Error: Failed to upload to {repo_id}. ")
365
+ return None
366
+ return url
367
+
368
+
369
+ def convert_repo_to_safetensors(repo_id, half = True, progress=gr.Progress(track_tqdm=True)):
370
+ download_dir = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}"
371
+ output_filename = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}.safetensors"
372
+ download_repo(repo_id, download_dir)
373
+ convert_diffusers_to_safetensors(download_dir, output_filename, half)
374
+ return output_filename
375
+
376
+
377
+ def convert_repo_to_safetensors_multi_sd(repo_id, files, is_upload, urls, half=True, progress=gr.Progress(track_tqdm=True)):
378
+ file = convert_repo_to_safetensors(repo_id, half)
379
+ if not urls: urls = []
380
+ url = ""
381
+ if is_upload:
382
+ url = upload_safetensors_to_repo(file)
383
+ if url: urls.append(url)
384
+ md = ""
385
+ for u in urls:
386
+ md += f"[Download {str(u).split('/')[-1]}]({str(u)})<br>"
387
+ if not files: files = []
388
+ files.append(file)
389
+ return gr.update(value=files), gr.update(value=urls, choices=urls), gr.update(value=md)
390
+
391
+
392
+ if __name__ == "__main__":
393
+ parser = argparse.ArgumentParser()
394
+
395
+ parser.add_argument("--repo_id", default=None, type=str, required=True, help="HF Repo ID of the model to convert.")
396
+ parser.add_argument("--half", default=True, help="Save weights in half precision.")
397
+
398
+ args = parser.parse_args()
399
+ assert args.repo_id is not None, "Must provide a Repo ID!"
400
+
401
+ convert_repo_to_safetensors(args.repo_id, args.half)
convert_url_to_diffusers_sdxl_gr.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+ import os
4
+ import torch
5
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL
6
+ import gradio as gr
7
+ # also requires aria, gdown, peft, huggingface_hub, safetensors, transformers, accelerate, pytorch_lightning
8
+
9
+
10
+ def list_sub(a, b):
11
+ return [e for e in a if e not in b]
12
+
13
+
14
+ def is_repo_name(s):
15
+ import re
16
+ return re.fullmatch(r'^[^/,\s]+?/[^/,\s]+?$', s)
17
+
18
+
19
+ def download_thing(directory, url, civitai_api_key="", progress=gr.Progress(track_tqdm=True)):
20
+ url = url.strip()
21
+ if "drive.google.com" in url:
22
+ original_dir = os.getcwd()
23
+ os.chdir(directory)
24
+ os.system(f"gdown --fuzzy {url}")
25
+ os.chdir(original_dir)
26
+ elif "huggingface.co" in url:
27
+ url = url.replace("?download=true", "")
28
+ if "/blob/" in url:
29
+ url = url.replace("/blob/", "/resolve/")
30
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
31
+ else:
32
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
33
+ elif "civitai.com" in url:
34
+ if "?" in url:
35
+ url = url.split("?")[0]
36
+ if civitai_api_key:
37
+ url = url + f"?token={civitai_api_key}"
38
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
39
+ else:
40
+ print("You need an API key to download Civitai models.")
41
+ else:
42
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
43
+
44
+
45
+ def get_local_model_list(dir_path):
46
+ model_list = []
47
+ valid_extensions = ('.safetensors')
48
+ for file in Path(dir_path).glob("*"):
49
+ if file.suffix in valid_extensions:
50
+ file_path = str(Path(f"{dir_path}/{file.name}"))
51
+ model_list.append(file_path)
52
+ return model_list
53
+
54
+
55
+ def get_download_file(temp_dir, url, civitai_key, progress=gr.Progress(track_tqdm=True)):
56
+ if not "http" in url and is_repo_name(url) and not Path(url).exists():
57
+ print(f"Use HF Repo: {url}")
58
+ new_file = url
59
+ elif not "http" in url and Path(url).exists():
60
+ print(f"Use local file: {url}")
61
+ new_file = url
62
+ elif Path(f"{temp_dir}/{url.split('/')[-1]}").exists():
63
+ print(f"File to download alreday exists: {url}")
64
+ new_file = f"{temp_dir}/{url.split('/')[-1]}"
65
+ else:
66
+ print(f"Start downloading: {url}")
67
+ before = get_local_model_list(temp_dir)
68
+ try:
69
+ download_thing(temp_dir, url.strip(), civitai_key)
70
+ except Exception:
71
+ print(f"Download failed: {url}")
72
+ return ""
73
+ after = get_local_model_list(temp_dir)
74
+ new_file = list_sub(after, before)[0] if list_sub(after, before) else ""
75
+ if not new_file:
76
+ print(f"Download failed: {url}")
77
+ return ""
78
+ print(f"Download completed: {url}")
79
+ return new_file
80
+
81
+
82
+ from diffusers import (
83
+ DPMSolverMultistepScheduler,
84
+ DPMSolverSinglestepScheduler,
85
+ KDPM2DiscreteScheduler,
86
+ EulerDiscreteScheduler,
87
+ EulerAncestralDiscreteScheduler,
88
+ HeunDiscreteScheduler,
89
+ LMSDiscreteScheduler,
90
+ DDIMScheduler,
91
+ DEISMultistepScheduler,
92
+ UniPCMultistepScheduler,
93
+ LCMScheduler,
94
+ PNDMScheduler,
95
+ KDPM2AncestralDiscreteScheduler,
96
+ DPMSolverSDEScheduler,
97
+ EDMDPMSolverMultistepScheduler,
98
+ DDPMScheduler,
99
+ EDMEulerScheduler,
100
+ TCDScheduler,
101
+ )
102
+
103
+
104
+ SCHEDULER_CONFIG_MAP = {
105
+ "DPM++ 2M": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}),
106
+ "DPM++ 2M Karras": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
107
+ "DPM++ 2M SDE": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "algorithm_type": "sde-dpmsolver++"}),
108
+ "DPM++ 2M SDE Karras": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}),
109
+ "DPM++ 2S": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}),
110
+ "DPM++ 2S Karras": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
111
+ "DPM++ 1S": (DPMSolverMultistepScheduler, {"solver_order": 1}),
112
+ "DPM++ 1S Karras": (DPMSolverMultistepScheduler, {"solver_order": 1, "use_karras_sigmas": True}),
113
+ "DPM++ 3M": (DPMSolverMultistepScheduler, {"solver_order": 3}),
114
+ "DPM++ 3M Karras": (DPMSolverMultistepScheduler, {"solver_order": 3, "use_karras_sigmas": True}),
115
+ "DPM++ SDE": (DPMSolverSDEScheduler, {"use_karras_sigmas": False}),
116
+ "DPM++ SDE Karras": (DPMSolverSDEScheduler, {"use_karras_sigmas": True}),
117
+ "DPM2": (KDPM2DiscreteScheduler, {}),
118
+ "DPM2 Karras": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}),
119
+ "DPM2 a": (KDPM2AncestralDiscreteScheduler, {}),
120
+ "DPM2 a Karras": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}),
121
+ "Euler": (EulerDiscreteScheduler, {}),
122
+ "Euler a": (EulerAncestralDiscreteScheduler, {}),
123
+ "Euler trailing": (EulerDiscreteScheduler, {"timestep_spacing": "trailing", "prediction_type": "sample"}),
124
+ "Euler a trailing": (EulerAncestralDiscreteScheduler, {"timestep_spacing": "trailing"}),
125
+ "Heun": (HeunDiscreteScheduler, {}),
126
+ "Heun Karras": (HeunDiscreteScheduler, {"use_karras_sigmas": True}),
127
+ "LMS": (LMSDiscreteScheduler, {}),
128
+ "LMS Karras": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
129
+ "DDIM": (DDIMScheduler, {}),
130
+ "DDIM trailing": (DDIMScheduler, {"timestep_spacing": "trailing"}),
131
+ "DEIS": (DEISMultistepScheduler, {}),
132
+ "UniPC": (UniPCMultistepScheduler, {}),
133
+ "UniPC Karras": (UniPCMultistepScheduler, {"use_karras_sigmas": True}),
134
+ "PNDM": (PNDMScheduler, {}),
135
+ "Euler EDM": (EDMEulerScheduler, {}),
136
+ "Euler EDM Karras": (EDMEulerScheduler, {"use_karras_sigmas": True}),
137
+ "DPM++ 2M EDM": (EDMDPMSolverMultistepScheduler, {"solver_order": 2, "solver_type": "midpoint", "final_sigmas_type": "zero", "algorithm_type": "dpmsolver++"}),
138
+ "DPM++ 2M EDM Karras": (EDMDPMSolverMultistepScheduler, {"use_karras_sigmas": True, "solver_order": 2, "solver_type": "midpoint", "final_sigmas_type": "zero", "algorithm_type": "dpmsolver++"}),
139
+ "DDPM": (DDPMScheduler, {}),
140
+
141
+ "DPM++ 2M Lu": (DPMSolverMultistepScheduler, {"use_lu_lambdas": True}),
142
+ "DPM++ 2M Ef": (DPMSolverMultistepScheduler, {"euler_at_final": True}),
143
+ "DPM++ 2M SDE Lu": (DPMSolverMultistepScheduler, {"use_lu_lambdas": True, "algorithm_type": "sde-dpmsolver++"}),
144
+ "DPM++ 2M SDE Ef": (DPMSolverMultistepScheduler, {"algorithm_type": "sde-dpmsolver++", "euler_at_final": True}),
145
+
146
+ "LCM": (LCMScheduler, {}),
147
+ "TCD": (TCDScheduler, {}),
148
+ "LCM trailing": (LCMScheduler, {"timestep_spacing": "trailing"}),
149
+ "TCD trailing": (TCDScheduler, {"timestep_spacing": "trailing"}),
150
+ "LCM Auto-Loader": (LCMScheduler, {}),
151
+ "TCD Auto-Loader": (TCDScheduler, {}),
152
+ }
153
+
154
+
155
+ def get_scheduler_config(name):
156
+ if not name in SCHEDULER_CONFIG_MAP.keys(): return SCHEDULER_CONFIG_MAP["Euler a"]
157
+ return SCHEDULER_CONFIG_MAP[name]
158
+
159
+
160
+ def save_readme_md(dir, url):
161
+ orig_url = ""
162
+ orig_name = ""
163
+ if is_repo_name(url):
164
+ orig_name = url
165
+ orig_url = f"https://huggingface.co/{url}/"
166
+ elif "http" in url:
167
+ orig_name = url
168
+ orig_url = url
169
+ if orig_name and orig_url:
170
+ md = f"""---
171
+ license: other
172
+ language:
173
+ - en
174
+ library_name: diffusers
175
+ pipeline_tag: text-to-image
176
+ tags:
177
+ - text-to-image
178
+ ---
179
+ Converted from [{orig_name}]({orig_url}).
180
+ """
181
+ else:
182
+ md = f"""---
183
+ license: other
184
+ language:
185
+ - en
186
+ library_name: diffusers
187
+ pipeline_tag: text-to-image
188
+ tags:
189
+ - text-to-image
190
+ ---
191
+ """
192
+ path = str(Path(dir, "README.md"))
193
+ with open(path, mode='w', encoding="utf-8") as f:
194
+ f.write(md)
195
+
196
+
197
+ def fuse_loras(pipe, lora_dict={}, temp_dir=".", civitai_key=""):
198
+ if not lora_dict or not isinstance(lora_dict, dict): return
199
+ a_list = []
200
+ w_list = []
201
+ for k, v in lora_dict.items():
202
+ if not k: continue
203
+ new_lora_file = get_download_file(temp_dir, k, civitai_key)
204
+ if not new_lora_file or not Path(new_lora_file).exists():
205
+ print(f"LoRA not found: {k}")
206
+ continue
207
+ w_name = Path(new_lora_file).name
208
+ a_name = Path(new_lora_file).stem
209
+ pipe.load_lora_weights(new_lora_file, weight_name = w_name, adapter_name = a_name)
210
+ a_list.append(a_name)
211
+ w_list.append(v)
212
+ if not a_list: return
213
+ pipe.set_adapters(a_list, adapter_weights=w_list)
214
+ pipe.fuse_lora(adapter_names=a_list, lora_scale=1.0)
215
+ pipe.unload_lora_weights()
216
+
217
+
218
+ def convert_url_to_diffusers_sdxl(url, civitai_key="", is_upload_sf=False, half=True, vae=None, scheduler="Euler a", lora_dict={}, progress=gr.Progress(track_tqdm=True)):
219
+ progress(0, desc="Start converting...")
220
+ temp_dir = "."
221
+ new_file = get_download_file(temp_dir, url, civitai_key)
222
+ if not new_file:
223
+ print(f"Not found: {url}")
224
+ return ""
225
+ new_repo_name = Path(new_file).stem.replace(" ", "_").replace(",", "_").replace(".", "_") #
226
+
227
+ pipe = None
228
+ if is_repo_name(url):
229
+ if half:
230
+ pipe = StableDiffusionXLPipeline.from_pretrained(new_file, use_safetensors=True, torch_dtype=torch.float16)
231
+ else:
232
+ pipe = StableDiffusionXLPipeline.from_pretrained(new_file, use_safetensors=True)
233
+ else:
234
+ if half:
235
+ pipe = StableDiffusionXLPipeline.from_single_file(new_file, use_safetensors=True, torch_dtype=torch.float16)
236
+ else:
237
+ pipe = StableDiffusionXLPipeline.from_single_file(new_file, use_safetensors=True)
238
+
239
+ new_vae_file = ""
240
+ if vae:
241
+ if is_repo_name(vae):
242
+ if half:
243
+ pipe.vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch.float16)
244
+ else:
245
+ pipe.vae = AutoencoderKL.from_pretrained(vae)
246
+ else:
247
+ new_vae_file = get_download_file(temp_dir, vae, civitai_key)
248
+ if new_vae_file and half:
249
+ pipe.vae = AutoencoderKL.from_single_file(new_vae_file, torch_dtype=torch.float16)
250
+ elif new_vae_file:
251
+ pipe.vae = AutoencoderKL.from_single_file(new_vae_file)
252
+
253
+ fuse_loras(pipe, lora_dict, temp_dir, civitai_key)
254
+
255
+ sconf = get_scheduler_config(scheduler)
256
+ pipe.scheduler = sconf[0].from_config(pipe.scheduler.config, **sconf[1])
257
+
258
+ if half:
259
+ pipe.save_pretrained(new_repo_name, safe_serialization=True, use_safetensors=True)
260
+ else:
261
+ pipe.save_pretrained(new_repo_name, safe_serialization=True, use_safetensors=True)
262
+
263
+ if Path(new_repo_name).exists():
264
+ save_readme_md(new_repo_name, url)
265
+
266
+ if not is_repo_name(new_file) and is_upload_sf:
267
+ import shutil
268
+ shutil.move(str(Path(new_file).resolve()), str(Path(new_repo_name, Path(new_file).name).resolve()))
269
+
270
+ progress(1, desc="Converted.")
271
+ return new_repo_name
272
+
273
+
274
+ def is_repo_exists(repo_id):
275
+ from huggingface_hub import HfApi
276
+ api = HfApi()
277
+ try:
278
+ if api.repo_exists(repo_id=repo_id): return True
279
+ else: return False
280
+ except Exception as e:
281
+ print(f"Error: Failed to connect {repo_id}. ")
282
+ return True # for safe
283
+
284
+
285
+ def create_diffusers_repo(new_repo_id, diffusers_folder, progress=gr.Progress(track_tqdm=True)):
286
+ from huggingface_hub import HfApi
287
+ import os
288
+ hf_token = os.environ.get("HF_TOKEN")
289
+ api = HfApi()
290
+ try:
291
+ progress(0, desc="Start uploading...")
292
+ api.create_repo(repo_id=new_repo_id, token=hf_token)
293
+ for path in Path(diffusers_folder).glob("*"):
294
+ if path.is_dir():
295
+ api.upload_folder(repo_id=new_repo_id, folder_path=str(path), path_in_repo=path.name, token=hf_token)
296
+ elif path.is_file():
297
+ api.upload_file(repo_id=new_repo_id, path_or_fileobj=str(path), path_in_repo=path.name, token=hf_token)
298
+ progress(1, desc="Uploaded.")
299
+ url = f"https://huggingface.co/{new_repo_id}"
300
+ except Exception as e:
301
+ print(f"Error: Failed to upload to {new_repo_id}. ")
302
+ print(e)
303
+ return ""
304
+ return url
305
+
306
+
307
+ def convert_url_to_diffusers_repo(dl_url, hf_user, hf_repo, hf_token, civitai_key="", is_upload_sf=False, repo_urls=[], half=True, vae=None,
308
+ scheduler="Euler a", lora1=None, lora1s=1.0, lora2=None, lora2s=1.0, lora3=None, lora3s=1.0,
309
+ lora4=None, lora4s=1.0, lora5=None, lora5s=1.0, progress=gr.Progress(track_tqdm=True)):
310
+ if not hf_user:
311
+ print(f"Invalid user name: {hf_user}")
312
+ progress(1, desc=f"Invalid user name: {hf_user}")
313
+ return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
314
+ if hf_token and not os.environ.get("HF_TOKEN"): os.environ['HF_TOKEN'] = hf_token
315
+ if not civitai_key and os.environ.get("CIVITAI_API_KEY"): civitai_key = os.environ.get("CIVITAI_API_KEY")
316
+ lora_dict = {lora1: lora1s, lora2: lora2s, lora3: lora3s, lora4: lora4s, lora5: lora5s}
317
+ new_path = convert_url_to_diffusers_sdxl(dl_url, civitai_key, is_upload_sf, half, vae, scheduler, lora_dict)
318
+ if not new_path: return ""
319
+ new_repo_id = f"{hf_user}/{Path(new_path).stem}"
320
+ if hf_repo != "": new_repo_id = f"{hf_user}/{hf_repo}"
321
+ if not is_repo_name(new_repo_id):
322
+ print(f"Invalid repo name: {new_repo_id}")
323
+ progress(1, desc=f"Invalid repo name: {new_repo_id}")
324
+ return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
325
+ if is_repo_exists(new_repo_id):
326
+ print(f"Repo already exists: {new_repo_id}")
327
+ progress(1, desc=f"Repo already exists: {new_repo_id}")
328
+ return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
329
+ repo_url = create_diffusers_repo(new_repo_id, new_path)
330
+ if not repo_urls: repo_urls = []
331
+ repo_urls.append(repo_url)
332
+ md = "Your new repo:<br>"
333
+ for u in repo_urls:
334
+ md += f"[{str(u).split('/')[-2]}/{str(u).split('/')[-1]}]({str(u)})<br>"
335
+ return gr.update(value=repo_urls, choices=repo_urls), gr.update(value=md)
336
+
337
+
338
+ if __name__ == "__main__":
339
+ parser = argparse.ArgumentParser()
340
+
341
+ parser.add_argument("--url", default=None, type=str, required=True, help="URL of the model to convert.")
342
+ parser.add_argument("--half", default=True, help="Save weights in half precision.")
343
+ parser.add_argument("--scheduler", default="Euler a", type=str, choices=list(SCHEDULER_CONFIG_MAP.keys()), required=False, help="Scheduler name to use.")
344
+ parser.add_argument("--vae", default=None, type=str, required=False, help="URL of the VAE to use.")
345
+ parser.add_argument("--civitai_key", default=None, type=str, required=False, help="Civitai API Key (If you want to download file from Civitai).")
346
+ parser.add_argument("--lora1", default=None, type=str, required=False, help="URL of the LoRA to use.")
347
+ parser.add_argument("--lora1s", default=1.0, type=float, required=False, help="LoRA weight scale of --lora1.")
348
+ parser.add_argument("--lora2", default=None, type=str, required=False, help="URL of the LoRA to use.")
349
+ parser.add_argument("--lora2s", default=1.0, type=float, required=False, help="LoRA weight scale of --lora2.")
350
+ parser.add_argument("--lora3", default=None, type=str, required=False, help="URL of the LoRA to use.")
351
+ parser.add_argument("--lora3s", default=1.0, type=float, required=False, help="LoRA weight scale of --lora3.")
352
+ parser.add_argument("--lora4", default=None, type=str, required=False, help="URL of the LoRA to use.")
353
+ parser.add_argument("--lora4s", default=1.0, type=float, required=False, help="LoRA weight scale of --lora4.")
354
+ parser.add_argument("--lora5", default=None, type=str, required=False, help="URL of the LoRA to use.")
355
+ parser.add_argument("--lora5s", default=1.0, type=float, required=False, help="LoRA weight scale of --lora5.")
356
+ parser.add_argument("--loras", default=None, type=str, required=False, help="Folder of the LoRA to use.")
357
+
358
+ args = parser.parse_args()
359
+ assert args.url is not None, "Must provide a URL!"
360
+
361
+ lora_dict = {args.lora1: args.lora1s, args.lora2: args.lora2s, args.lora3: args.lora3s, args.lora4: args.lora4s, args.lora5: args.lora5s}
362
+
363
+ if args.loras and Path(args.loras).exists():
364
+ for p in Path(args.loras).glob('**/*.safetensors'):
365
+ lora_dict[str(p)] = 1.0
366
+
367
+ convert_url_to_diffusers_sdxl(args.url, args.civitai_key, args.half, args.vae, args.scheduler, lora_dict)
hf_merge.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tqdm import tqdm
2
+ import argparse
3
+ import requests
4
+ import merge
5
+ import os
6
+ import sys
7
+ import shutil
8
+ import yaml
9
+ from pathlib import Path
10
+ import gradio as gr
11
+
12
+ def parse_arguments():
13
+ parser = argparse.ArgumentParser(description="Merge HuggingFace models")
14
+ parser.add_argument('repo_list', type=str, help='File containing list of repositories to merge, supports mergekit yaml or txt')
15
+ parser.add_argument('output_dir', type=str, help='Directory for the merged models')
16
+ parser.add_argument('-base_model', type=str, default='staging/base_model', help='Base model directory')
17
+ parser.add_argument('-staging_model', type=str, default='staging/merge_model', help='Staging model directory')
18
+ parser.add_argument('-p', type=float, default=0.5, help='Dropout probability')
19
+ parser.add_argument('-lambda', dest='lambda_val', type=float, default=1.0, help='Scaling factor for the weight delta')
20
+ parser.add_argument('--dry', action='store_true', help='Run in dry mode without making any changes')
21
+ return parser.parse_args()
22
+
23
+ def repo_list_generator(file_path, default_p, default_lambda_val):
24
+ _, file_extension = os.path.splitext(file_path)
25
+
26
+ # Branching based on file extension
27
+ if file_extension.lower() == '.yaml' or file_extension.lower() == ".yml":
28
+ with open(file_path, 'r', encoding='utf-8') as file:
29
+ data = yaml.safe_load(file)
30
+ for model_info in data['models']:
31
+ model_name = model_info['model']
32
+ p = model_info.get('parameters', {}).get('weight', default_p)
33
+ lambda_val = 1 / model_info.get('parameters', {}).get('density', default_lambda_val)
34
+ yield model_name, p, lambda_val
35
+
36
+ else: # Defaulting to txt file processing
37
+ with open(file_path, "r", encoding='utf-8') as file:
38
+ repos_to_process = file.readlines()
39
+ for repo in repos_to_process:
40
+ yield repo.strip(), default_p, default_lambda_val
41
+
42
+ def reset_directories(directories, dry_run):
43
+ for directory in directories:
44
+ if os.path.exists(directory):
45
+ if dry_run:
46
+ print(f"[DRY RUN] Would delete directory {directory}")
47
+ else:
48
+ shutil.rmtree(directory)
49
+ print(f"Directory {directory} deleted successfully.")
50
+
51
+ def do_merge(tensor_map, staging_path, p, lambda_val, dry_run=False):
52
+ if dry_run:
53
+ print(f"[DRY RUN] Would merge with {staging_path}")
54
+ else:
55
+ try:
56
+ print(f"Merge operation for {staging_path}")
57
+ tensor_map = merge.merge_folder(tensor_map, staging_path, p, lambda_val)
58
+ print("Merge operation completed successfully.")
59
+ except Exception as e:
60
+ print(f"Error during merge operation: {e}")
61
+ return tensor_map
62
+
63
+ def do_merge_files(base_path, staging_path, output_path, p, lambda_val, dry_run=False):
64
+ if dry_run:
65
+ print(f"[DRY RUN] Would merge with {staging_path}")
66
+ else:
67
+ try:
68
+ print(f"Merge operation for {staging_path}")
69
+ tensor_map = merge.merge_files(base_path, staging_path, output_path, p, lambda_val)
70
+ print("Merge operation completed successfully.")
71
+ except Exception as e:
72
+ print(f"Error during merge operation: {e}")
73
+ return tensor_map
74
+
75
+ def do_merge_diffusers(tensor_map, staging_path, p, lambda_val, skip_dirs, dry_run=False):
76
+ if dry_run:
77
+ print(f"[DRY RUN] Would merge with {staging_path}")
78
+ else:
79
+ try:
80
+ print(f"Merge operation for {staging_path}")
81
+ tensor_map = merge.merge_folder_diffusers(tensor_map, staging_path, p, lambda_val, skip_dirs)
82
+ print("Merge operation completed successfully.")
83
+ except Exception as e:
84
+ print(f"Error during merge operation: {e}")
85
+ return tensor_map
86
+
87
+ def download_repo(repo_name, path, dry_run=False):
88
+ from huggingface_hub import snapshot_download
89
+ if dry_run:
90
+ print(f"[DRY RUN] Would download repository {repo_name} to {path}")
91
+ else:
92
+ print(f"Repository {repo_name} cloning.")
93
+ try:
94
+ snapshot_download(repo_id=repo_name, local_dir=path)
95
+ except Exception as e:
96
+ print(e)
97
+ return
98
+ print(f"Repository {repo_name} cloned successfully.")
99
+
100
+ def download_thing(directory, url, progress=gr.Progress(track_tqdm=True)):
101
+ civitai_api_key= os.environ.get("CIVITAI_API_KEY")
102
+ url = url.strip()
103
+ if "drive.google.com" in url:
104
+ original_dir = os.getcwd()
105
+ os.chdir(directory)
106
+ os.system(f"gdown --fuzzy {url}")
107
+ os.chdir(original_dir)
108
+ elif "huggingface.co" in url:
109
+ url = url.replace("?download=true", "")
110
+ if "/blob/" in url:
111
+ url = url.replace("/blob/", "/resolve/")
112
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
113
+ else:
114
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
115
+ elif "civitai.com" in url:
116
+ if "?" in url:
117
+ url = url.split("?")[0]
118
+ if civitai_api_key:
119
+ url = url + f"?token={civitai_api_key}"
120
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
121
+ else:
122
+ print("You need an API key to download Civitai models.")
123
+ else:
124
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
125
+
126
+ def get_local_model_list(dir_path):
127
+ model_list = []
128
+ valid_extensions = ('.safetensors')
129
+ for file in Path(dir_path).glob("*"):
130
+ if file.suffix in valid_extensions:
131
+ file_path = str(Path(f"{dir_path}/{file.name}"))
132
+ model_list.append(file_path)
133
+ return model_list
134
+
135
+ def list_sub(a, b):
136
+ return [e for e in a if e not in b]
137
+
138
+ def get_download_file(temp_dir, url):
139
+ new_file = None
140
+ if not "http" in url and Path(url).exists():
141
+ print(f"Use local file: {url}")
142
+ new_file = url
143
+ elif Path(f"{temp_dir}/{url.split('/')[-1]}").exists():
144
+ print(f"File to download alreday exists: {url}")
145
+ new_file = f"{temp_dir}/{url.split('/')[-1]}"
146
+ else:
147
+ print(f"Start downloading: {url}")
148
+ before = get_local_model_list(temp_dir)
149
+ try:
150
+ download_thing(temp_dir, url.strip())
151
+ except Exception:
152
+ print(f"Download failed: {url}")
153
+ return None
154
+ after = get_local_model_list(temp_dir)
155
+ new_file = list_sub(after, before)[0] if list_sub(after, before) else None
156
+ if new_file is None:
157
+ print(f"Download failed: {url}")
158
+ return None
159
+ print(f"Download completed: {url}")
160
+ return new_file
161
+
162
+ def download_file(url, path, dry_run=False):
163
+ if dry_run:
164
+ print(f"[DRY RUN] Would download file {url} to {path}")
165
+ else:
166
+ print(f"File {url} cloning.")
167
+ try:
168
+ path = get_download_file(path, url)
169
+ except Exception as e:
170
+ print(e)
171
+ return None
172
+ print(f"File {url} cloned successfully.")
173
+ return path
174
+
175
+ def is_repo_name(s):
176
+ import re
177
+ return re.fullmatch(r'^[^/,\s]+?/[^/,\s]+?$', s)
178
+
179
+ def should_create_symlink(repo_name):
180
+ if os.path.exists(repo_name):
181
+ return True, os.path.isfile(repo_name)
182
+ return False, False
183
+
184
+ def download_or_link_repo(repo_name, path, dry_run=False):
185
+ symlink, is_file = should_create_symlink(repo_name)
186
+
187
+ if symlink and is_file:
188
+ os.makedirs(path, exist_ok=True)
189
+ symlink_path = os.path.join(path, os.path.basename(repo_name))
190
+ os.symlink(repo_name, symlink_path)
191
+ elif symlink:
192
+ os.symlink(repo_name, path)
193
+ elif "http" in repo_name:
194
+ return download_file(repo_name, path, dry_run)
195
+ elif is_repo_name(repo_name):
196
+ download_repo(repo_name, path, dry_run)
197
+ return None
198
+
199
+ def delete_repo(path, dry_run=False):
200
+ if dry_run:
201
+ print(f"[DRY RUN] Would delete repository at {path}")
202
+ else:
203
+ try:
204
+ shutil.rmtree(path)
205
+ print(f"Repository at {path} deleted successfully.")
206
+ except Exception as e:
207
+ print(f"Error deleting repository at {path}: {e}")
208
+
209
+ def get_max_vocab_size(repo_list):
210
+ max_vocab_size = 0
211
+ repo_with_max_vocab = None
212
+
213
+ for repo in repo_list:
214
+ repo_name = repo[0].strip()
215
+ url = f"https://huggingface.co/{repo_name}/raw/main/config.json"
216
+
217
+ try:
218
+ response = requests.get(url)
219
+ response.raise_for_status()
220
+ config = response.json()
221
+ vocab_size = config.get("vocab_size", 0)
222
+
223
+ if vocab_size > max_vocab_size:
224
+ max_vocab_size = vocab_size
225
+ repo_with_max_vocab = repo_name
226
+
227
+ except requests.RequestException as e:
228
+ print(f"Error fetching data from {url}: {e}")
229
+
230
+ return max_vocab_size, repo_with_max_vocab
231
+
232
+ def download_json_files(repo_name, file_paths, output_dir):
233
+ base_url = f"https://huggingface.co/{repo_name}/raw/main/"
234
+
235
+ for file_path in file_paths:
236
+ url = base_url + file_path
237
+ response = requests.get(url)
238
+ if response.status_code == 200:
239
+ with open(os.path.join(output_dir, os.path.basename(file_path)), 'wb') as file:
240
+ file.write(response.content)
241
+ else:
242
+ print(f"Failed to download {file_path}")
243
+
244
+ def get_merged_path(filename, output_dir):
245
+ from datetime import datetime, timezone, timedelta
246
+ dt_now = datetime.now(timezone(timedelta(hours=9)))
247
+ basename = dt_now.strftime('Merged_%Y%m%d_%H%M')
248
+ ext = Path(filename).suffix
249
+ return str(Path(output_dir, basename + ext)), str(Path(output_dir, basename + ".yaml"))
250
+
251
+ def repo_list_to_yaml(repo_list_path, repo_list, output_yaml_path):
252
+ if Path(repo_list_path).suffix.lower() in (".yaml", ".yml"):
253
+ shutil.copy(repo_list_path, output_yaml_path)
254
+ else:
255
+ repos = list(repo_list)
256
+ yaml_dict = {}
257
+ yaml_dict.setdefault('models', {})
258
+ for repo in repos:
259
+ model, weight, density = repo
260
+ model_info = {}
261
+ model_info['model'] = str(model)
262
+ model_info.setdefault('parameters', {})
263
+ model_info['parameters']['weight'] = float(weight)
264
+ model_info['parameters']['density'] = float(density)
265
+ yaml_dict['models'][str(model.split("/")[-1])] = model_info
266
+ with open(output_yaml_path, mode='w', encoding='utf-8') as file:
267
+ yaml.dump(yaml_dict, file, default_flow_style=False, allow_unicode=True)
268
+
269
+ def process_repos(output_dir, base_model, staging_model, repo_list_file, p, lambda_val, skip_dirs, dry_run=False, progress=gr.Progress(track_tqdm=True)):
270
+ repo_type = "Default" # ("Default", "Files", "Diffusers")
271
+ # Check if output_dir exists
272
+ if os.path.exists(output_dir):
273
+ sys.exit(f"Output directory '{output_dir}' already exists. Exiting to prevent data loss.")
274
+
275
+ # Reset base and staging directories
276
+ reset_directories([base_model, staging_model], dry_run)
277
+
278
+ # Make sure staging and output directories exist
279
+ os.makedirs(base_model, exist_ok=True)
280
+ os.makedirs(staging_model, exist_ok=True)
281
+
282
+ repo_list_gen = repo_list_generator(repo_list_file, p, lambda_val)
283
+
284
+ repos_to_process = list(repo_list_gen)
285
+
286
+ # Initial download for 'base_model'
287
+ path = download_or_link_repo(repos_to_process[0][0].strip(), base_model, dry_run)
288
+ if path is not None and (".safetensors" in path or ".sft" in path): repo_type = "Files"
289
+ elif Path(base_model, "model_index.json").exists(): repo_type = "Diffusers"
290
+ if repo_type == "Files":
291
+ os.makedirs(output_dir, exist_ok=True)
292
+ output_file_path, output_yaml_path = get_merged_path(path, output_dir)
293
+ repo_list_to_yaml(repo_list_file, repo_list_gen, output_yaml_path)
294
+ for i, repo in enumerate(tqdm(repos_to_process[1:], desc='Merging Files')):
295
+ repo_name = repo[0].strip()
296
+ repo_p = repo[1]
297
+ repo_lambda = repo[2]
298
+ delete_repo(staging_model, dry_run)
299
+ staging_path = download_or_link_repo(repo_name, staging_model, dry_run)
300
+ do_merge_files(path, staging_path, output_file_path, repo_p, repo_lambda, dry_run)
301
+ reset_directories([base_model, staging_model], dry_run)
302
+ return output_file_path, output_yaml_path
303
+ elif repo_type == "Diffusers":
304
+ merge.copy_dirs(base_model, output_dir)
305
+ tensor_map = merge.map_tensors_to_files_diffusers(base_model, skip_dirs)
306
+
307
+ for i, repo in enumerate(tqdm(repos_to_process[1:], desc='Merging Repos')):
308
+ repo_name = repo[0].strip()
309
+ repo_p = repo[1]
310
+ repo_lambda = repo[2]
311
+ delete_repo(staging_model, dry_run)
312
+ download_or_link_repo(repo_name, staging_model, dry_run)
313
+ tensor_map = do_merge_diffusers(tensor_map, staging_model, repo_p, repo_lambda, skip_dirs, dry_run)
314
+
315
+ os.makedirs(output_dir, exist_ok=True)
316
+ merge.copy_skipped_dirs(base_model, output_dir, skip_dirs)
317
+ merge.copy_nontensor_files(base_model, output_dir)
318
+ merge.save_tensor_map(tensor_map, output_dir)
319
+
320
+ reset_directories([base_model, staging_model], dry_run)
321
+ return None, None
322
+ elif repo_type == "Default":
323
+ merge.copy_dirs(base_model, output_dir)
324
+ tensor_map = merge.map_tensors_to_files(base_model)
325
+
326
+ for i, repo in enumerate(tqdm(repos_to_process[1:], desc='Merging Repos')):
327
+ repo_name = repo[0].strip()
328
+ repo_p = repo[1]
329
+ repo_lambda = repo[2]
330
+ delete_repo(staging_model, dry_run)
331
+ download_or_link_repo(repo_name, staging_model, dry_run)
332
+ tensor_map = do_merge(tensor_map, staging_model, repo_p, repo_lambda, dry_run)
333
+
334
+ os.makedirs(output_dir, exist_ok=True)
335
+ merge.copy_nontensor_files(base_model, output_dir)
336
+
337
+ # Handle LLMs that add tokens by taking the largest
338
+ if os.path.exists(os.path.join(output_dir, 'config.json')):
339
+ max_vocab_size, repo_name = get_max_vocab_size(repos_to_process)
340
+ if max_vocab_size > 0:
341
+ file_paths = ['config.json', 'special_tokens_map.json', 'tokenizer.json', 'tokenizer_config.json']
342
+ download_json_files(repo_name, file_paths, output_dir)
343
+
344
+ reset_directories([base_model, staging_model], dry_run)
345
+ merge.save_tensor_map(tensor_map, output_dir)
346
+ return None, None
347
+
348
+ if __name__ == "__main__":
349
+ args = parse_arguments()
350
+ skip_dirs = ['vae', 'text_encoder']
351
+ process_repos(args.output_dir, args.base_model, args.staging_model, args.repo_list, args.p, args.lambda_val, skip_dirs, args.dry)
352
+
merge.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import numpy as np
3
+ import os
4
+ import shutil
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from safetensors.torch import safe_open, save_file
8
+ import glob
9
+ from pathlib import Path
10
+
11
+ def merge_tensors(tensor1, tensor2, p):
12
+ # Calculate the delta of the weights
13
+ delta = tensor2 - tensor1
14
+ # Generate the mask m^t from Bernoulli distribution
15
+ m = torch.from_numpy(np.random.binomial(1, p, delta.shape)).to(tensor1.dtype)
16
+ # Apply the mask to the delta to get δ̃^t
17
+ delta_tilde = m * delta
18
+ # Scale the masked delta by the dropout rate to get δ̂^t
19
+ delta_hat = delta_tilde / (1 - p)
20
+ return delta_hat
21
+
22
+ def merge_safetensors(file_path1, file_path2, p, lambda_val):
23
+ merged_tensors = {}
24
+
25
+ with safe_open(file_path1, framework="pt", device="cpu") as f1, safe_open(file_path2, framework="pt", device="cpu") as f2:
26
+ keys1 = set(f1.keys())
27
+ keys2 = set(f2.keys())
28
+ common_keys = keys1.intersection(keys2)
29
+
30
+ for key in common_keys:
31
+ tensor1 = f1.get_tensor(key)
32
+ tensor2 = f2.get_tensor(key)
33
+ tensor1, tensor2 = resize_tensors(tensor1, tensor2)
34
+ merged_tensors[key] = tensor1 + lambda_val * merge_tensors(tensor1, tensor2, p)
35
+ print("merging", key)
36
+
37
+ return merged_tensors
38
+
39
+ class BinDataHandler():
40
+ def __init__(self, data):
41
+ self.data = data
42
+
43
+ def get_tensor(self, key):
44
+ return self.data[key]
45
+
46
+ def read_tensors(file_path, ext):
47
+ if ext == ".safetensors" and (file_path.endswith(".safetensors") or file_path.endswith(".sft")):
48
+ print(f"Reading tensors from {file_path} in {ext} format.")
49
+ f = safe_open(file_path, framework="pt", device="cpu")
50
+ return f, set(f.keys())
51
+ if ext == ".bin" and file_path.endswith(".bin"):
52
+ print(f"Reading tensors from {file_path} in {ext} format.")
53
+ data = torch.load(file_path, map_location=torch.device('cpu'))
54
+ f = BinDataHandler(data)
55
+ return f, set(data.keys())
56
+ return None, None
57
+
58
+ def resize_tensors(tensor1, tensor2):
59
+ if len(tensor1.shape) not in [1, 2]:
60
+ return tensor1, tensor2
61
+
62
+ if len(tensor1.shape) == 1 and len(tensor2.shape) == 1:
63
+ if tensor1.shape[-1] < tensor2.shape[-1]:
64
+ padding_size = tensor2.shape[-1] - tensor1.shape[-1]
65
+ pad = torch.nn.ConstantPad1d((padding_size, 0), 0)
66
+ tensor1 = pad(tensor1)
67
+ elif tensor2.shape[-1] < tensor1.shape[-1]:
68
+ padding_size = tensor1.shape[-1] - tensor2.shape[-1]
69
+ pad = torch.nn.ConstantPad1d((padding_size, 0), 0)
70
+ tensor2 = pad(tensor2)
71
+ else:
72
+ # Pad along the last dimension (width)
73
+ if tensor1.shape[-1] < tensor2.shape[-1]:
74
+ padding_size = tensor2.shape[-1] - tensor1.shape[-1]
75
+ tensor1 = F.pad(tensor1, (0, padding_size, 0, 0))
76
+ elif tensor2.shape[-1] < tensor1.shape[-1]:
77
+ padding_size = tensor1.shape[-1] - tensor2.shape[-1]
78
+ tensor2 = F.pad(tensor2, (0, padding_size, 0, 0))
79
+
80
+ # Pad along the first dimension (height)
81
+ if tensor1.shape[0] < tensor2.shape[0]:
82
+ padding_size = tensor2.shape[0] - tensor1.shape[0]
83
+ tensor1 = F.pad(tensor1, (0, 0, 0, padding_size))
84
+ elif tensor2.shape[0] < tensor1.shape[0]:
85
+ padding_size = tensor1.shape[0] - tensor2.shape[0]
86
+ tensor2 = F.pad(tensor2, (0, 0, 0, padding_size))
87
+
88
+ return tensor1, tensor2
89
+
90
+ def merge_folder(tensor_map, directory_path, p, lambda_val):
91
+ keys1 = set(tensor_map.keys())
92
+ # Some repos have both bin and safetensors, choose safetensors if so
93
+ ext = None
94
+ for filename in glob.glob(f'{directory_path}/**', recursive=True):
95
+ filename = os.path.normpath(filename)
96
+ # Default to safetensors
97
+ if filename.endswith(".safetensors") or filename.endswith(".sft"):
98
+ ext = ".safetensors"
99
+ if filename.endswith(".bin") and ext is None:
100
+ ext = ".bin"
101
+ if ext is None:
102
+ raise "Could not find model files"
103
+
104
+ for filename in glob.glob(f'{directory_path}/**', recursive=True):
105
+ filename = os.path.normpath(filename)
106
+ f2, keys2 = read_tensors(filename, ext)
107
+ if keys2:
108
+ common_keys = keys1.intersection(keys2)
109
+ for key in common_keys:
110
+ if "block_sparse_moe.gate" in key:
111
+ tensor1 = tensor_map[key]['tensor']
112
+ tensor2 = f2.get_tensor(key)
113
+ tensor_map[key]['tensor'] = (tensor1 + tensor2) /2.0
114
+ print("merging", key)
115
+ continue
116
+ tensor1 = tensor_map[key]['tensor']
117
+ tensor2 = f2.get_tensor(key)
118
+ tensor1, tensor2 = resize_tensors(tensor1, tensor2)
119
+ tensor_map[key]['tensor'] = tensor1 + lambda_val * merge_tensors(tensor1, tensor2, p)
120
+ print("merging", key)
121
+ return tensor_map
122
+
123
+ def merge_folder_diffusers(tensor_map, directory_path, p, lambda_val, skip_dirs):
124
+ keys1 = set(tensor_map.keys())
125
+ # Some repos have both bin and safetensors, choose safetensors if so
126
+ ext = None
127
+ for filename in [p for p in glob.glob(f'{directory_path}/*', recursive=False) if ".fp16." not in p]:
128
+ filename = os.path.normpath(filename)
129
+ # Default to safetensors
130
+ if filename.endswith(".safetensors") or filename.endswith(".sft"):
131
+ ext = ".safetensors"
132
+ if filename.endswith(".bin") and ext is None:
133
+ ext = ".bin"
134
+ if ext is None:
135
+ raise "Could not find model files"
136
+
137
+ for dirname in glob.glob(f'{directory_path}/*/', recursive=False):
138
+ if Path(dirname).stem in skip_dirs: continue
139
+ for filename in [p for p in glob.glob(f'{dirname}/*', recursive=False) if ".fp16." not in p]:
140
+ filename = os.path.normpath(filename)
141
+ f2, keys2 = read_tensors(filename, ext)
142
+ if keys2:
143
+ common_keys = keys1.intersection(keys2)
144
+ for key in common_keys:
145
+ if "block_sparse_moe.gate" in key:
146
+ tensor1 = tensor_map[key]['tensor']
147
+ tensor2 = f2.get_tensor(key)
148
+ tensor_map[key]['tensor'] = (tensor1 + tensor2) /2.0
149
+ print("merging", key)
150
+ continue
151
+ tensor1 = tensor_map[key]['tensor']
152
+ tensor2 = f2.get_tensor(key)
153
+ tensor1, tensor2 = resize_tensors(tensor1, tensor2)
154
+ tensor_map[key]['tensor'] = tensor1 + lambda_val * merge_tensors(tensor1, tensor2, p)
155
+ print("merging", key)
156
+ return tensor_map
157
+
158
+ def merge_files(base_model, second_model, output_model, p, lambda_val):
159
+ merged = merge_safetensors(base_model, second_model, p, lambda_val)
160
+ save_file(merged, output_model)
161
+
162
+ def map_tensors_to_files(directory_path):
163
+ tensor_map = {}
164
+
165
+ for filename in glob.glob(f'{directory_path}/**', recursive=True):
166
+ filename = os.path.normpath(filename)
167
+ f, keys = read_tensors(filename, '.safetensors')
168
+ if keys:
169
+ for key in keys:
170
+ tensor = f.get_tensor(key)
171
+ tensor_map[key] = {'filename':filename, 'shape':tensor.shape, 'tensor': tensor}
172
+
173
+ return tensor_map
174
+
175
+ def map_tensors_to_files_diffusers(directory_path, skip_dirs):
176
+ tensor_map = {}
177
+
178
+ for dirname in glob.glob(f'{directory_path}/*/', recursive=False):
179
+ if Path(dirname).stem in skip_dirs: continue
180
+ for filename in [p for p in glob.glob(f'{dirname}/*', recursive=False) if ".fp16." not in p]:
181
+ filename = os.path.normpath(filename)
182
+ f, keys = read_tensors(filename, '.safetensors')
183
+ if keys:
184
+ for key in keys:
185
+ tensor = f.get_tensor(key)
186
+ tensor_map[key] = {'filename':filename, 'shape':tensor.shape, 'tensor': tensor}
187
+
188
+ return tensor_map
189
+
190
+ def copy_nontensor_files(from_path, to_path):
191
+ print(f"Copying non-tensor files {from_path} to {to_path}")
192
+ shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*"), dirs_exist_ok=True)
193
+
194
+ def copy_skipped_dirs(from_path, to_path, skip_dirs):
195
+ for dirname in glob.glob(f'{from_path}/*/', recursive=False):
196
+ if Path(dirname).stem in skip_dirs:
197
+ dirname = os.path.normpath(dirname)
198
+ print(f"Copying skipped files {dirname} to {to_path}")
199
+ shutil.copytree(Path(dirname).resolve(), Path(to_path, Path(dirname).stem).resolve(), ignore=shutil.ignore_patterns(".*", "README*"), dirs_exist_ok=True)
200
+
201
+ def save_tensor_map(tensor_map, output_folder):
202
+ metadata = {'format': 'pt'}
203
+ by_filename = {}
204
+
205
+ for key, value in tensor_map.items():
206
+ filename = value["filename"]
207
+ tensor = value["tensor"]
208
+ filename = os.path.normpath(filename)
209
+ if filename not in by_filename:
210
+ by_filename[filename] = {}
211
+ by_filename[filename][key] = tensor
212
+
213
+ for filename in sorted(by_filename.keys()):
214
+ filename = os.path.normpath(filename)
215
+ if Path(output_folder, Path(filename).parent.name).exists():
216
+ output_file = str(Path(output_folder, Path(filename).parent.name, Path(filename).name))
217
+ else:
218
+ output_file = str(Path(output_folder, Path(filename).name))
219
+ print("Saving:", output_file)
220
+ save_file(by_filename[filename], output_file, metadata=metadata)
221
+
222
+ def copy_dirs(src: str, dst: str):
223
+ shutil.copytree(src, dst, ignore=shutil.ignore_patterns("*.*"), dirs_exist_ok=True)
224
+
225
+ def main():
226
+ # Parse command-line arguments
227
+ parser = argparse.ArgumentParser(description='Merge two safetensor model files.')
228
+ parser.add_argument('base_model', type=str, help='The base model safetensor file')
229
+ parser.add_argument('second_model', type=str, help='The second model safetensor file')
230
+ parser.add_argument('output_model', type=str, help='The output merged model safetensor file')
231
+ parser.add_argument('-p', type=float, default=0.5, help='Dropout probability')
232
+ parser.add_argument('-lambda', dest='lambda_val', type=float, default=1.0, help='Scaling factor for the weight delta')
233
+ args = parser.parse_args()
234
+
235
+ skip_dirs = ['vae', 'text_encoder']
236
+ if os.path.isdir(args.base_model):
237
+ if not os.path.exists(args.output_model):
238
+ os.makedirs(args.output_model)
239
+ if os.path.exists(args.base_model + "/model_index.json"): # assume Diffusers Repo
240
+ copy_dirs(args.base_model, args.output_model)
241
+ tensor_map = map_tensors_to_files_diffusers(args.base_model, skip_dirs)
242
+ tensor_map = merge_folder_diffusers(tensor_map, args.second_model, args.p, args.lambda_val, skip_dirs)
243
+ copy_skipped_dirs(args.base_model, args.output_model, skip_dirs)
244
+ copy_nontensor_files(args.base_model, args.output_model)
245
+ save_tensor_map(tensor_map, args.output_model)
246
+ else:
247
+ copy_dirs(args.base_model, args.output_model)
248
+ tensor_map = map_tensors_to_files(args.base_model)
249
+ tensor_map = merge_folder(tensor_map, args.second_model, args.p, args.lambda_val)
250
+ copy_nontensor_files(args.base_model, args.output_model)
251
+ save_tensor_map(tensor_map, args.output_model)
252
+ else:
253
+ merged = merge_safetensors(args.base_model, args.second_model, args.p, args.lambda_val)
254
+ save_file(merged, args.output_model)
255
+
256
+ if __name__ == '__main__':
257
+ main()
merge_gr.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import os
3
+ import shutil
4
+ import yaml
5
+ import gradio as gr
6
+ from hf_merge import process_repos, repo_list_generator
7
+
8
+ def list_sub(a, b):
9
+ return [e for e in a if e not in b]
10
+
11
+ def is_repo_name(s):
12
+ import re
13
+ return re.fullmatch(r'^[^/,\s]+?/[^/,\s]+?$', s)
14
+
15
+ def is_valid_model_name(s):
16
+ if is_repo_name(s) or Path(s).suffix in (".safetensors", ".bin", ".sft"): return True
17
+ else: return False
18
+
19
+ def is_repo_exists(repo_id):
20
+ from huggingface_hub import HfApi
21
+ api = HfApi()
22
+ try:
23
+ if api.repo_exists(repo_id=repo_id): return True
24
+ else: return False
25
+ except Exception as e:
26
+ print(f"Error: Failed to connect {repo_id}. ")
27
+ return True # for safe
28
+
29
+ def create_repo(new_repo_id):
30
+ from huggingface_hub import HfApi
31
+ import os
32
+ hf_token = os.environ.get("HF_TOKEN")
33
+ api = HfApi()
34
+ try:
35
+ api.create_repo(repo_id=new_repo_id, token=hf_token, private=True)
36
+ url = f"https://huggingface.co/{new_repo_id}"
37
+ except Exception as e:
38
+ print(f"Error: Failed to create {new_repo_id}. ")
39
+ print(e)
40
+ return ""
41
+ return url
42
+
43
+ def upload_dir_to_repo(new_repo_id, folder, progress=gr.Progress(track_tqdm=True)):
44
+ from huggingface_hub import HfApi
45
+ import os
46
+ hf_token = os.environ.get("HF_TOKEN")
47
+ api = HfApi()
48
+ try:
49
+ progress(0, desc="Start uploading...")
50
+ for path in Path(folder).glob("*"):
51
+ if path.is_dir():
52
+ api.upload_folder(repo_id=new_repo_id, folder_path=str(path), path_in_repo=path.name, token=hf_token)
53
+ elif path.is_file():
54
+ api.upload_file(repo_id=new_repo_id, path_or_fileobj=str(path), path_in_repo=path.name, token=hf_token)
55
+ progress(1, desc="Uploaded.")
56
+ url = f"https://huggingface.co/{new_repo_id}"
57
+ except Exception as e:
58
+ print(f"Error: Failed to upload to {new_repo_id}. ")
59
+ print(e)
60
+ return ""
61
+ return url
62
+
63
+ merge_yaml_path = "./merge_yaml.yaml"
64
+ merge_text_path = "./merge_txt.txt"
65
+
66
+ def load_yaml_dict(yaml_path: str):
67
+ yaml_dict = None
68
+ try:
69
+ data = None
70
+ with open(yaml_path, 'r', encoding='utf-8') as file:
71
+ data = yaml.safe_load(file)
72
+ except Exception as e:
73
+ print(e)
74
+ data = None
75
+ if isinstance(data, dict) and 'models' in data.keys() and data['models']:
76
+ yaml_dict = data
77
+ return yaml_dict
78
+
79
+ def repo_text_to_yaml_dict(text_path: str, default_p: float, default_lambda_val: float):
80
+ yaml_dict = {}
81
+ repos = list(repo_list_generator(text_path, default_p, default_lambda_val))
82
+ yaml_dict.setdefault('models', {})
83
+ for repo in repos:
84
+ model, weight, density = repo
85
+ if not is_valid_model_name(model): continue
86
+ model_info = {}
87
+ model_info['model'] = str(model)
88
+ model_info.setdefault('parameters', {})
89
+ model_info['parameters']['weight'] = float(weight)
90
+ model_info['parameters']['density'] = float(density)
91
+ yaml_dict['models'][str(model.split("/")[-1])] = model_info
92
+ return yaml_dict
93
+
94
+ def gen_repo_list(input_text: str, default_p: float, default_lambda_val: float):
95
+ yaml_dict = {}
96
+ if Path(merge_yaml_path).exists():
97
+ yaml_dict = load_yaml_dict(merge_yaml_path)
98
+ else:
99
+ with open(merge_text_path, mode='w', encoding='utf-8') as file:
100
+ file.write(input_text)
101
+ yaml_dict = repo_text_to_yaml_dict(merge_text_path, default_p, default_lambda_val)
102
+ yaml_str = yaml.dump(yaml_dict, allow_unicode=True)
103
+ md = f"""``` yaml
104
+ {yaml_str}
105
+ ```"""
106
+ return md
107
+
108
+ def upload_repo_list(filepath: str, default_p: float, default_lambda_val: float):
109
+ yaml_dict = {}
110
+ if Path(filepath).suffix in [".yml", ".yaml"]:
111
+ yaml_dict = load_yaml_dict(filepath)
112
+ if yaml_dict is not None:
113
+ with open(merge_yaml_path, mode='w', encoding='utf-8') as file:
114
+ yaml.dump(yaml_dict, file, default_flow_style=False, allow_unicode=True)
115
+ else:
116
+ yaml_dict = repo_text_to_yaml_dict(filepath, default_p, default_lambda_val)
117
+ shutil.copy(filepath, merge_text_path)
118
+ yaml_str = yaml.dump(yaml_dict, allow_unicode=True)
119
+ md = f"""``` yaml
120
+ {yaml_str}
121
+ ```"""
122
+ return md
123
+
124
+ def clear_repo_list():
125
+ Path(merge_text_path).unlink(missing_ok=True)
126
+ Path(merge_yaml_path).unlink(missing_ok=True)
127
+ return gr.update(value=""), gr.update(value="")
128
+
129
+ def clear_output(output_dir: str):
130
+ shutil.rmtree(output_dir, ignore_errors=True)
131
+ print(f"Directory {output_dir} deleted successfully.")
132
+
133
+ def process_repos_gr(mode, p, lambda_val, skip_dirs: list[str], hf_user: str, hf_repo: str, hf_token: str,
134
+ is_upload=True, is_upload_sf=False, repo_exist_ok=False, files=[], repo_urls=[], progress=gr.Progress(track_tqdm=True)):
135
+ if is_upload and not hf_user:
136
+ print(f"Invalid user name: {hf_user}")
137
+ progress(1, desc=f"Invalid user name: {hf_user}")
138
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(visible=True)
139
+ if hf_token and not os.environ.get("HF_TOKEN"): os.environ['HF_TOKEN'] = hf_token
140
+ output_dir = "output"
141
+ base_model = "base_model"
142
+ staging_model = "staging_model"
143
+ output_model = str(Path(output_dir, base_model))
144
+ output_model = output_dir
145
+ repo_list_file = None
146
+ if is_upload:
147
+ clear_output(output_dir)
148
+ files = []
149
+ if Path(merge_yaml_path).exists(): repo_list_file = merge_yaml_path
150
+ elif Path(merge_text_path).exists(): repo_list_file = merge_text_path
151
+ if repo_list_file is None:
152
+ print("Repo list is not found.")
153
+ progress(1, desc="Repo list is not found.")
154
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(visible=True)
155
+ new_repo_id = f"{hf_user}/{hf_repo}"
156
+ if is_upload and not is_repo_name(new_repo_id):
157
+ print(f"Invalid Repo name: {new_repo_id}")
158
+ progress(1, desc=f"Invalid repo name: {new_repo_id}")
159
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(visible=True)
160
+ if is_upload and is_repo_exists(new_repo_id):
161
+ print(f"Repo already exists: {new_repo_id}")
162
+ if not repo_exist_ok:
163
+ progress(1, desc=f"Repo already exists: {new_repo_id}")
164
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(visible=True)
165
+ try:
166
+ progress(0, desc=f"Downloading Repos.")
167
+ if mode == "SDXL":
168
+ output_file_path, output_yaml_path = process_repos(output_dir, base_model, staging_model,
169
+ repo_list_file, p, lambda_val, skip_dirs + ["text_encoder"], False)
170
+ else:
171
+ output_file_path, output_yaml_path = process_repos(output_dir, base_model, staging_model,
172
+ repo_list_file, p, lambda_val, skip_dirs, False)
173
+ if mode == "Single files":
174
+ files.append(output_file_path)
175
+ files.append(output_yaml_path)
176
+ except Exception as e:
177
+ print(e)
178
+ progress(1, desc=f"Error occured: {e}")
179
+ repo_url = None
180
+ if Path(output_model).exists():
181
+ if mode != "Single files": save_readme_md(output_model, repo_list_file, p, lambda_val)
182
+ if is_upload_sf:
183
+ if mode == "SDXL": files.append(convert_output_to_safetensors(output_model, hf_repo))
184
+ elif mode == "SD1.5": files.append(convert_output_to_safetensors_sd(output_model, hf_repo))
185
+ if is_upload:
186
+ if not is_repo_exists(new_repo_id): create_repo(new_repo_id)
187
+ repo_url = upload_dir_to_repo(new_repo_id, output_model)
188
+ else:
189
+ progress(1, desc=f"Merging failed.")
190
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(visible=True)
191
+ if not repo_urls: repo_urls = []
192
+ if repo_url: repo_urls.append(repo_url)
193
+ md = "Your new Repo:<br>"
194
+ for u in repo_urls:
195
+ md += f"[{str(u).split('/')[-2]}/{str(u).split('/')[-1]}]({str(u)})<br>"
196
+ return gr.update(value=files), gr.update(value=repo_urls, choices=repo_urls), gr.update(value=md)
197
+
198
+ from convert_repo_to_safetensors_gr import convert_diffusers_to_safetensors
199
+ def convert_output_to_safetensors(output_dir: str, repo_name: str, progress=gr.Progress(track_tqdm=True)):
200
+ output_filename = f"{repo_name}.safetensors"
201
+ convert_diffusers_to_safetensors(output_dir, Path(output_dir, output_filename))
202
+ return output_filename
203
+
204
+ from convert_repo_to_safetensors_sd_gr import convert_diffusers_to_safetensors as convert_diffusers_to_safetensors_sd
205
+ def convert_output_to_safetensors_sd(output_dir: str, repo_name: str, progress=gr.Progress(track_tqdm=True)):
206
+ output_filename = f"{repo_name}.safetensors"
207
+ convert_diffusers_to_safetensors_sd(output_dir, Path(output_dir, output_filename))
208
+ return output_filename
209
+
210
+ def upload_repo_list(filepath: str, default_p: float, default_lambda_val: float):
211
+ yaml_dict = {}
212
+ if Path(filepath).suffix in [".yml", ".yaml"]:
213
+ yaml_dict = load_yaml_dict(filepath)
214
+ if yaml_dict is not None:
215
+ with open(merge_yaml_path, mode='w', encoding='utf-8') as file:
216
+ yaml.dump(yaml_dict, file, default_flow_style=False, allow_unicode=True)
217
+ else:
218
+ yaml_dict = repo_text_to_yaml_dict(filepath, default_p, default_lambda_val)
219
+ shutil.copy(filepath, merge_text_path)
220
+ yaml_str = yaml.dump(yaml_dict, allow_unicode=True)
221
+ md = f"""``` yaml
222
+ {yaml_str}
223
+ ```"""
224
+ return md
225
+
226
+ def save_readme_md(dir: str, yaml_path:str, default_p: float, default_lambda_val: float):
227
+ yaml_dict = {}
228
+ if Path(yaml_path).suffix in [".yml", ".yaml"]:
229
+ yaml_dict = load_yaml_dict(yaml_path)
230
+ else:
231
+ yaml_dict = repo_text_to_yaml_dict(yaml_path, default_p, default_lambda_val)
232
+ yaml_str = yaml.dump(yaml_dict, allow_unicode=True)
233
+ md = f"""---
234
+ license: other
235
+ language:
236
+ - en
237
+ library_name: diffusers
238
+ pipeline_tag: text-to-image
239
+ tags:
240
+ - text-to-image
241
+ ---
242
+ <br>Merged model.<br>
243
+ ## 🧩 Configuration
244
+ ``` yaml
245
+ {yaml_str}
246
+ ```"""
247
+ path = str(Path(dir, "README.md"))
248
+ with open(path, mode='w', encoding="utf-8") as f:
249
+ f.write(md)
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub
2
+ safetensors
3
+ transformers
4
+ accelerate
5
+ git+https://github.com/huggingface/diffusers
6
+ pytorch_lightning
7
+ peft
8
+ aria2
9
+ gdown
10
+ torch==2.2.0
11
+ numpy
12
+ GitPython
13
+ PyYAML