pugliathomas commited on
Commit
90ce69b
Β·
verified Β·
1 Parent(s): 3847444

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +318 -0
app.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import torch
4
+ import gc
5
+ from safetensors.torch import load_file, save_file
6
+ from tqdm import tqdm
7
+ from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig
8
+ import os
9
+ import tempfile
10
+ import shutil
11
+
12
+ class QuantumCLIPExtractor:
13
+ @classmethod
14
+ def extract_from_checkpoint(cls, checkpoint_path: str) -> tuple[dict, dict]:
15
+ state_dict = load_file(checkpoint_path)
16
+ components = {"clip_g": {}, "clip_l": {}}
17
+
18
+ for key in state_dict:
19
+ clean_key = key.replace("conditioner.embedders.0.", "").replace("cond_stage_model.", "")
20
+ if 'text_model.encoder.layers.23' in clean_key or 'text_projection' in clean_key:
21
+ components["clip_g"][clean_key] = state_dict[key]
22
+ elif 'text_model.encoder.layers' in clean_key:
23
+ components["clip_l"][clean_key] = state_dict[key]
24
+
25
+ return (
26
+ cls.process_component(components["clip_g"]),
27
+ cls.process_component(components["clip_l"])
28
+ )
29
+
30
+ @staticmethod
31
+ def process_component(component: dict) -> dict:
32
+ processed = {}
33
+ replacements = {
34
+ "layer_norm1": "self_attn_layer_norm",
35
+ "layer_norm2": "final_layer_norm",
36
+ "mlp.fc1": "fc1",
37
+ "mlp.fc2": "fc2",
38
+ "positional_embedding": "embeddings.position_embedding.weight",
39
+ "token_embedding": "embeddings.token_embedding.weight"
40
+ }
41
+
42
+ for key in component:
43
+ new_key = key
44
+ for old, new in replacements.items():
45
+ new_key = new_key.replace(old, new)
46
+ processed[new_key] = component[key]
47
+ return processed
48
+
49
+ @spaces.GPU(duration=300)
50
+ def load_custom_clip(ckpt_path: str) -> CLIPTextModel:
51
+ clip_g, clip_l = QuantumCLIPExtractor.extract_from_checkpoint(ckpt_path)
52
+ merged_state = {**clip_g, **clip_l}
53
+ config = CLIPTextConfig.from_pretrained("openai/clip-vit-large-patch14")
54
+ text_encoder = CLIPTextModel(config)
55
+
56
+ model_state = text_encoder.state_dict()
57
+ filtered = {k: v for k, v in merged_state.items() if k in model_state}
58
+ model_state.update(filtered)
59
+ text_encoder.load_state_dict(model_state, strict=False)
60
+ return text_encoder.eval().to("cuda")
61
+
62
+ @spaces.GPU(duration=60)
63
+ def process_fft_chunked(param1_half, param2_half, hyper_out, decoherence_mask, chunk_size=32):
64
+ orig_shape = param1_half.shape
65
+ flat_shape = (-1, orig_shape[-1])
66
+ flat1 = param1_half.view(flat_shape)
67
+ flat2 = param2_half.view(flat_shape)
68
+ flat_mask = decoherence_mask.view(flat_shape)
69
+ processed_chunks = []
70
+
71
+ for i in tqdm(range(0, flat1.shape[0], chunk_size), desc="Processing FFT chunks", leave=False):
72
+ with torch.no_grad():
73
+ chunk1 = flat1[i:i+chunk_size].float()
74
+ chunk2 = flat2[i:i+chunk_size].float()
75
+ mask_chunk = flat_mask[i:i+chunk_size].to('cuda', non_blocking=True)
76
+
77
+ fft1 = torch.fft.rfft(chunk1, dim=-1)
78
+ fft2 = torch.fft.rfft(chunk2, dim=-1)
79
+ freq_dim = fft1.shape[-1]
80
+
81
+ if hyper_out.shape[-1] < freq_dim:
82
+ coeff = hyper_out.repeat(1, freq_dim // hyper_out.shape[-1] + 1)[:, :freq_dim]
83
+ else:
84
+ coeff = hyper_out[:, :freq_dim]
85
+ coeff = coeff.expand(chunk1.size(0), -1).float()
86
+
87
+ magnitude_blend = torch.sigmoid(coeff * 5)
88
+ phase_blend = torch.sigmoid(coeff * 3 - 1)
89
+
90
+ blended_fft_real = magnitude_blend * fft1.real + (1 - magnitude_blend) * fft2.real
91
+ blended_fft_imag = phase_blend * fft1.imag + (1 - phase_blend) * fft2.imag
92
+ blended_fft = torch.complex(blended_fft_real, blended_fft_imag)
93
+
94
+ blended_chunk = torch.fft.irfft(blended_fft, n=chunk1.shape[-1], dim=-1)
95
+ avg = (chunk1 + chunk2) / 2
96
+ blended_chunk[mask_chunk] = avg[mask_chunk]
97
+
98
+ blended_chunk = blended_chunk.half().cpu()
99
+ processed_chunks.append(blended_chunk)
100
+
101
+ del chunk1, chunk2, fft1, fft2, blended_fft, avg, mask_chunk, magnitude_blend, phase_blend, coeff
102
+
103
+ blended_flat = torch.cat(processed_chunks, dim=0)
104
+ return blended_flat.view(orig_shape)
105
+
106
+ @spaces.GPU(duration=600)
107
+ def quantum_merge_models(base_model_path, secondary_model_path, clip_source, prompt, output_path, entanglement=0.7714, chunk_size=2048, add_vpred=False, progress=gr.Progress()):
108
+ try:
109
+ progress(0, desc="Loading models...")
110
+ model1 = load_file(base_model_path)
111
+ model2 = load_file(secondary_model_path)
112
+
113
+ progress(0.1, desc="Loading CLIP encoder...")
114
+ text_encoder = load_custom_clip(base_model_path if clip_source == "Base" else secondary_model_path)
115
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
116
+
117
+ progress(0.2, desc="Setting up hypernet...")
118
+ hypernet = torch.nn.Sequential(
119
+ torch.nn.Linear(768, 1024),
120
+ torch.nn.GELU(),
121
+ torch.nn.Linear(1024, 256),
122
+ torch.nn.Tanh()
123
+ ).cuda().half()
124
+
125
+ with torch.no_grad():
126
+ text_inputs = tokenizer(
127
+ prompt,
128
+ padding="max_length",
129
+ max_length=77,
130
+ truncation=True,
131
+ return_tensors="pt"
132
+ )
133
+ text_input_ids = text_inputs.input_ids.to("cuda")
134
+ text_emb = text_encoder(text_input_ids).pooler_output.half()
135
+ hyper_out = hypernet(text_emb).float()
136
+
137
+ merged_model = {}
138
+ keys = list(model1.keys())
139
+ total_keys = len(keys)
140
+
141
+ for idx, key in enumerate(keys):
142
+ progress((0.3 + (idx / total_keys) * 0.6), desc=f"Merging parameters {idx+1}/{total_keys}")
143
+
144
+ if key in model2:
145
+ param1 = model1[key].cuda().half()
146
+ param2 = model2[key].cuda().half()
147
+
148
+ if 'weight' in key:
149
+ seed = abs(hash(prompt + key)) % (2**32)
150
+ torch.manual_seed(seed)
151
+ decoherence_mask = torch.rand(param1.shape, device='cpu') < 0.2
152
+
153
+ blended = process_fft_chunked(param1, param2, hyper_out, decoherence_mask, chunk_size)
154
+ merged = (blended.float() * entanglement +
155
+ (param1.cpu().float() * (1 - entanglement) +
156
+ param2.cpu().float() * (1 - entanglement)) / 2).half()
157
+ else:
158
+ merged = (param1 + param2) / 2
159
+
160
+ merged_model[key] = merged.cpu()
161
+ del param1, param2, merged
162
+ if 'weight' in key: del blended
163
+ gc.collect()
164
+ torch.cuda.empty_cache()
165
+
166
+ else:
167
+ merged_model[key] = model1[key]
168
+
169
+ progress(0.95, desc="Saving merged model...")
170
+ save_file(merged_model, output_path)
171
+
172
+ # Add v_pred tensor if requested
173
+ if add_vpred:
174
+ try:
175
+ state_dict = load_file(output_path)
176
+ state_dict['v_pred'] = torch.tensor([])
177
+ vpred_path = output_path.replace('.safetensors', '_vpred.safetensors')
178
+ save_file(state_dict, vpred_path)
179
+ return True, f"Merge successful! Created v-pred version.", vpred_path
180
+ except Exception as e:
181
+ return False, f"v_pred addition failed: {str(e)}", output_path
182
+
183
+ return True, f"Merge successful!", output_path
184
+ except Exception as e:
185
+ return False, f"Error: {str(e)}", None
186
+
187
+ def wrapper(base_file, secondary_file, clip_source, prompt, entanglement, chunk_size, add_vpred, progress=gr.Progress()):
188
+ try:
189
+ if base_file is None or secondary_file is None:
190
+ return None, "Please upload both models"
191
+
192
+ # Create temporary output directory
193
+ temp_dir = tempfile.mkdtemp()
194
+ output_name = os.path.join(temp_dir, "merged_model.safetensors")
195
+
196
+ # Get actual file paths from Gradio file objects
197
+ base_path = base_file.name if hasattr(base_file, 'name') else base_file
198
+ secondary_path = secondary_file.name if hasattr(secondary_file, 'name') else secondary_file
199
+
200
+ success, message, final_path = quantum_merge_models(
201
+ base_path,
202
+ secondary_path,
203
+ clip_source,
204
+ prompt,
205
+ output_name,
206
+ entanglement,
207
+ chunk_size,
208
+ add_vpred,
209
+ progress
210
+ )
211
+
212
+ if success and final_path and os.path.exists(final_path):
213
+ return final_path, message
214
+ else:
215
+ # Clean up temp directory if merge failed
216
+ shutil.rmtree(temp_dir, ignore_errors=True)
217
+ return None, message
218
+
219
+ except Exception as e:
220
+ return None, f"Wrapper error: {str(e)}"
221
+
222
+ def create_interface():
223
+ with gr.Blocks(title="Quantum Model Merger", theme=gr.themes.Soft()) as interface:
224
+ gr.Markdown("""
225
+ # πŸ§ͺ Quantum Model Merger for SDXL
226
+
227
+ Advanced SDXL model merger using quantum-inspired FFT blending with prompt-guided fusion.
228
+
229
+ ## Instructions:
230
+ 1. Upload your base and secondary SDXL models (.safetensors format)
231
+ 2. Choose which model's CLIP to use for prompt encoding
232
+ 3. Enter a prompt to guide the merge (this affects how models blend)
233
+ 4. Adjust parameters and click merge
234
+ 5. Download your merged model
235
+
236
+ ⚠️ **Note:** This process requires significant GPU memory and may take 5-10 minutes for SDXL models.
237
+ """)
238
+
239
+ with gr.Row():
240
+ with gr.Column():
241
+ base_model = gr.File(
242
+ label="πŸ“ Base Model (.safetensors)",
243
+ file_types=[".safetensors"],
244
+ type="filepath"
245
+ )
246
+ secondary_model = gr.File(
247
+ label="πŸ“ Secondary Model (.safetensors)",
248
+ file_types=[".safetensors"],
249
+ type="filepath"
250
+ )
251
+
252
+ with gr.Row():
253
+ clip_source = gr.Radio(
254
+ ["Base", "Secondary"],
255
+ value="Base",
256
+ label="🎯 CLIP Source Model",
257
+ info="Which model's CLIP encoder to use for prompt processing"
258
+ )
259
+
260
+ prompt = gr.Textbox(
261
+ label="✨ Fusion Prompt",
262
+ value="1girl, solo, best quality, masterpiece",
263
+ lines=3,
264
+ info="This prompt guides how the models blend together"
265
+ )
266
+
267
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
268
+ entanglement = gr.Slider(
269
+ 0.0, 1.0,
270
+ value=0.7714,
271
+ label="Entanglement Strength",
272
+ info="Higher = more FFT blending, Lower = more averaging"
273
+ )
274
+ chunk_size = gr.Slider(
275
+ 128, 4096,
276
+ value=2048,
277
+ step=128,
278
+ label="Chunk Size",
279
+ info="Lower = less memory usage but slower"
280
+ )
281
+ vpred_check = gr.Checkbox(
282
+ label="Add v_pred tensor (for v-prediction models)",
283
+ value=False
284
+ )
285
+
286
+ merge_btn = gr.Button("πŸš€ Start Merge", variant="primary", size="lg")
287
+
288
+ with gr.Column():
289
+ output_file = gr.File(
290
+ label="πŸ’Ύ Merged Model",
291
+ type="filepath"
292
+ )
293
+ logs = gr.Textbox(
294
+ label="πŸ“‹ Status",
295
+ interactive=False,
296
+ lines=10,
297
+ value="Ready to merge..."
298
+ )
299
+
300
+ gr.Markdown("""
301
+ ## Tips:
302
+ - **Entanglement**: 0.77 is a good default. Higher values create more creative blends.
303
+ - **Prompt**: Use prompts that represent the style/content you want to emphasize in the merge.
304
+ - **Chunk Size**: Reduce if you encounter memory errors.
305
+ - **V-Pred**: Only enable if you specifically need v-prediction support.
306
+ """)
307
+
308
+ merge_btn.click(
309
+ wrapper,
310
+ [base_model, secondary_model, clip_source, prompt, entanglement, chunk_size, vpred_check],
311
+ [output_file, logs]
312
+ )
313
+
314
+ return interface
315
+
316
+ if __name__ == "__main__":
317
+ interface = create_interface()
318
+ interface.launch()