File size: 12,805 Bytes
90ce69b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
import gradio as gr
import spaces
import torch
import gc
from safetensors.torch import load_file, save_file
from tqdm import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig
import os
import tempfile
import shutil

class QuantumCLIPExtractor:
    @classmethod
    def extract_from_checkpoint(cls, checkpoint_path: str) -> tuple[dict, dict]:
        state_dict = load_file(checkpoint_path)
        components = {"clip_g": {}, "clip_l": {}}
        
        for key in state_dict:
            clean_key = key.replace("conditioner.embedders.0.", "").replace("cond_stage_model.", "")
            if 'text_model.encoder.layers.23' in clean_key or 'text_projection' in clean_key:
                components["clip_g"][clean_key] = state_dict[key]
            elif 'text_model.encoder.layers' in clean_key:
                components["clip_l"][clean_key] = state_dict[key]
                
        return (
            cls.process_component(components["clip_g"]),
            cls.process_component(components["clip_l"])
        )

    @staticmethod
    def process_component(component: dict) -> dict:
        processed = {}
        replacements = {
            "layer_norm1": "self_attn_layer_norm",
            "layer_norm2": "final_layer_norm",
            "mlp.fc1": "fc1",
            "mlp.fc2": "fc2",
            "positional_embedding": "embeddings.position_embedding.weight",
            "token_embedding": "embeddings.token_embedding.weight"
        }
        
        for key in component:
            new_key = key
            for old, new in replacements.items():
                new_key = new_key.replace(old, new)
            processed[new_key] = component[key]
        return processed

@spaces.GPU(duration=300)
def load_custom_clip(ckpt_path: str) -> CLIPTextModel:
    clip_g, clip_l = QuantumCLIPExtractor.extract_from_checkpoint(ckpt_path)
    merged_state = {**clip_g, **clip_l}
    config = CLIPTextConfig.from_pretrained("openai/clip-vit-large-patch14")
    text_encoder = CLIPTextModel(config)
    
    model_state = text_encoder.state_dict()
    filtered = {k: v for k, v in merged_state.items() if k in model_state}
    model_state.update(filtered)
    text_encoder.load_state_dict(model_state, strict=False)
    return text_encoder.eval().to("cuda")

@spaces.GPU(duration=60)
def process_fft_chunked(param1_half, param2_half, hyper_out, decoherence_mask, chunk_size=32):
    orig_shape = param1_half.shape
    flat_shape = (-1, orig_shape[-1])
    flat1 = param1_half.view(flat_shape)
    flat2 = param2_half.view(flat_shape)
    flat_mask = decoherence_mask.view(flat_shape)
    processed_chunks = []
    
    for i in tqdm(range(0, flat1.shape[0], chunk_size), desc="Processing FFT chunks", leave=False):
        with torch.no_grad():
            chunk1 = flat1[i:i+chunk_size].float()
            chunk2 = flat2[i:i+chunk_size].float()
            mask_chunk = flat_mask[i:i+chunk_size].to('cuda', non_blocking=True)

            fft1 = torch.fft.rfft(chunk1, dim=-1)
            fft2 = torch.fft.rfft(chunk2, dim=-1)
            freq_dim = fft1.shape[-1]

            if hyper_out.shape[-1] < freq_dim:
                coeff = hyper_out.repeat(1, freq_dim // hyper_out.shape[-1] + 1)[:, :freq_dim]
            else:
                coeff = hyper_out[:, :freq_dim]
            coeff = coeff.expand(chunk1.size(0), -1).float()

            magnitude_blend = torch.sigmoid(coeff * 5)
            phase_blend = torch.sigmoid(coeff * 3 - 1)

            blended_fft_real = magnitude_blend * fft1.real + (1 - magnitude_blend) * fft2.real
            blended_fft_imag = phase_blend * fft1.imag + (1 - phase_blend) * fft2.imag
            blended_fft = torch.complex(blended_fft_real, blended_fft_imag)

            blended_chunk = torch.fft.irfft(blended_fft, n=chunk1.shape[-1], dim=-1)
            avg = (chunk1 + chunk2) / 2
            blended_chunk[mask_chunk] = avg[mask_chunk]

            blended_chunk = blended_chunk.half().cpu()
            processed_chunks.append(blended_chunk)

            del chunk1, chunk2, fft1, fft2, blended_fft, avg, mask_chunk, magnitude_blend, phase_blend, coeff
    
    blended_flat = torch.cat(processed_chunks, dim=0)
    return blended_flat.view(orig_shape)

@spaces.GPU(duration=600)
def quantum_merge_models(base_model_path, secondary_model_path, clip_source, prompt, output_path, entanglement=0.7714, chunk_size=2048, add_vpred=False, progress=gr.Progress()):
    try:
        progress(0, desc="Loading models...")
        model1 = load_file(base_model_path)
        model2 = load_file(secondary_model_path)
        
        progress(0.1, desc="Loading CLIP encoder...")
        text_encoder = load_custom_clip(base_model_path if clip_source == "Base" else secondary_model_path)
        tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
        
        progress(0.2, desc="Setting up hypernet...")
        hypernet = torch.nn.Sequential(
            torch.nn.Linear(768, 1024),
            torch.nn.GELU(),
            torch.nn.Linear(1024, 256),
            torch.nn.Tanh()
        ).cuda().half()

        with torch.no_grad():
            text_inputs = tokenizer(
                prompt,
                padding="max_length",
                max_length=77,
                truncation=True,
                return_tensors="pt"
            )
            text_input_ids = text_inputs.input_ids.to("cuda")
            text_emb = text_encoder(text_input_ids).pooler_output.half()
            hyper_out = hypernet(text_emb).float()

        merged_model = {}
        keys = list(model1.keys())
        total_keys = len(keys)
        
        for idx, key in enumerate(keys):
            progress((0.3 + (idx / total_keys) * 0.6), desc=f"Merging parameters {idx+1}/{total_keys}")
            
            if key in model2:
                param1 = model1[key].cuda().half()
                param2 = model2[key].cuda().half()

                if 'weight' in key:
                    seed = abs(hash(prompt + key)) % (2**32)
                    torch.manual_seed(seed)
                    decoherence_mask = torch.rand(param1.shape, device='cpu') < 0.2

                    blended = process_fft_chunked(param1, param2, hyper_out, decoherence_mask, chunk_size)
                    merged = (blended.float() * entanglement + 
                             (param1.cpu().float() * (1 - entanglement) + 
                              param2.cpu().float() * (1 - entanglement)) / 2).half()
                else:
                    merged = (param1 + param2) / 2

                merged_model[key] = merged.cpu()
                del param1, param2, merged
                if 'weight' in key: del blended
                gc.collect()
                torch.cuda.empty_cache()

            else:
                merged_model[key] = model1[key]

        progress(0.95, desc="Saving merged model...")
        save_file(merged_model, output_path)
        
        # Add v_pred tensor if requested
        if add_vpred:
            try:
                state_dict = load_file(output_path)
                state_dict['v_pred'] = torch.tensor([])
                vpred_path = output_path.replace('.safetensors', '_vpred.safetensors')
                save_file(state_dict, vpred_path)
                return True, f"Merge successful! Created v-pred version.", vpred_path
            except Exception as e:
                return False, f"v_pred addition failed: {str(e)}", output_path

        return True, f"Merge successful!", output_path
    except Exception as e:
        return False, f"Error: {str(e)}", None

def wrapper(base_file, secondary_file, clip_source, prompt, entanglement, chunk_size, add_vpred, progress=gr.Progress()):
    try:
        if base_file is None or secondary_file is None:
            return None, "Please upload both models"
        
        # Create temporary output directory
        temp_dir = tempfile.mkdtemp()
        output_name = os.path.join(temp_dir, "merged_model.safetensors")
        
        # Get actual file paths from Gradio file objects
        base_path = base_file.name if hasattr(base_file, 'name') else base_file
        secondary_path = secondary_file.name if hasattr(secondary_file, 'name') else secondary_file
        
        success, message, final_path = quantum_merge_models(
            base_path,
            secondary_path,
            clip_source,
            prompt,
            output_name,
            entanglement,
            chunk_size,
            add_vpred,
            progress
        )
        
        if success and final_path and os.path.exists(final_path):
            return final_path, message
        else:
            # Clean up temp directory if merge failed
            shutil.rmtree(temp_dir, ignore_errors=True)
            return None, message
            
    except Exception as e:
        return None, f"Wrapper error: {str(e)}"

def create_interface():
    with gr.Blocks(title="Quantum Model Merger", theme=gr.themes.Soft()) as interface:
        gr.Markdown("""
        # πŸ§ͺ Quantum Model Merger for SDXL
        
        Advanced SDXL model merger using quantum-inspired FFT blending with prompt-guided fusion.
        
        ## Instructions:
        1. Upload your base and secondary SDXL models (.safetensors format)
        2. Choose which model's CLIP to use for prompt encoding
        3. Enter a prompt to guide the merge (this affects how models blend)
        4. Adjust parameters and click merge
        5. Download your merged model
        
        ⚠️ **Note:** This process requires significant GPU memory and may take 5-10 minutes for SDXL models.
        """)
        
        with gr.Row():
            with gr.Column():
                base_model = gr.File(
                    label="πŸ“ Base Model (.safetensors)",
                    file_types=[".safetensors"],
                    type="filepath"
                )
                secondary_model = gr.File(
                    label="πŸ“ Secondary Model (.safetensors)",
                    file_types=[".safetensors"],
                    type="filepath"
                )
                
                with gr.Row():
                    clip_source = gr.Radio(
                        ["Base", "Secondary"], 
                        value="Base",
                        label="🎯 CLIP Source Model",
                        info="Which model's CLIP encoder to use for prompt processing"
                    )
                
                prompt = gr.Textbox(
                    label="✨ Fusion Prompt",
                    value="1girl, solo, best quality, masterpiece",
                    lines=3,
                    info="This prompt guides how the models blend together"
                )
                
                with gr.Accordion("βš™οΈ Advanced Settings", open=False):
                    entanglement = gr.Slider(
                        0.0, 1.0, 
                        value=0.7714,
                        label="Entanglement Strength",
                        info="Higher = more FFT blending, Lower = more averaging"
                    )
                    chunk_size = gr.Slider(
                        128, 4096, 
                        value=2048, 
                        step=128,
                        label="Chunk Size",
                        info="Lower = less memory usage but slower"
                    )
                    vpred_check = gr.Checkbox(
                        label="Add v_pred tensor (for v-prediction models)",
                        value=False
                    )
                
                merge_btn = gr.Button("πŸš€ Start Merge", variant="primary", size="lg")

            with gr.Column():
                output_file = gr.File(
                    label="πŸ’Ύ Merged Model",
                    type="filepath"
                )
                logs = gr.Textbox(
                    label="πŸ“‹ Status", 
                    interactive=False, 
                    lines=10,
                    value="Ready to merge..."
                )

        gr.Markdown("""
        ## Tips:
        - **Entanglement**: 0.77 is a good default. Higher values create more creative blends.
        - **Prompt**: Use prompts that represent the style/content you want to emphasize in the merge.
        - **Chunk Size**: Reduce if you encounter memory errors.
        - **V-Pred**: Only enable if you specifically need v-prediction support.
        """)

        merge_btn.click(
            wrapper,
            [base_model, secondary_model, clip_source, prompt, entanglement, chunk_size, vpred_check],
            [output_file, logs]
        )
    
    return interface

if __name__ == "__main__":
    interface = create_interface()
    interface.launch()