AbstractPhil commited on
Commit
14883b6
Β·
verified Β·
1 Parent(s): 03cefb2

Create sd15_inference_mechanism.py

Browse files
Files changed (1) hide show
  1. sd15_inference_mechanism.py +242 -0
sd15_inference_mechanism.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================================
2
+ # SD 1.5 Γ— MEMORY-CLIP-SEQ: Full Sequence Output
3
+ #
4
+ # The seq77 model produces (B, 77, 768) β€” same shape as CLIP's native
5
+ # last_hidden_state. Drop-in replacement for SD's text encoder output.
6
+ #
7
+ # Comparisons:
8
+ # A) Standard CLIP: truncated 77 tokens
9
+ # B) Seq77 model: full 576-token context β†’ reconstructed 77-position sequence
10
+ # C) Seq77 pooled + EOS inject: v3 approach but with the better pooled model
11
+ # ============================================================================
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from diffusers import StableDiffusionPipeline, DDIMScheduler
16
+ from transformers import AutoModel, CLIPTextModel, CLIPTokenizer
17
+ from PIL import Image
18
+ import os
19
+ import numpy as np
20
+
21
+ SEQ_REPO = "AbstractPhil/geolip-clip-vit-large-patch14-ctx576-seq77"
22
+ SD15_REPO = "stable-diffusion-v1-5/stable-diffusion-v1-5"
23
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
24
+ DTYPE = torch.float16
25
+
26
+
27
+ # ══════════════════════════════════════════════════════════════════
28
+ # LOAD
29
+ # ══════════════════════════════════════════════════════════════════
30
+
31
+ print("Loading SD 1.5...")
32
+ pipe = StableDiffusionPipeline.from_pretrained(
33
+ SD15_REPO, torch_dtype=DTYPE, safety_checker=None)
34
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
35
+ pipe = pipe.to(DEVICE)
36
+
37
+ print("Loading Memory-CLIP-Seq...")
38
+ seq_model = AutoModel.from_pretrained(SEQ_REPO, trust_remote_code=True)
39
+ seq_model = seq_model.to(DEVICE).eval()
40
+
41
+ tokenizer = pipe.tokenizer
42
+ text_encoder = pipe.text_encoder
43
+ unet = pipe.unet
44
+ vae = pipe.vae
45
+ scheduler = pipe.scheduler
46
+ print("Ready.")
47
+
48
+
49
+ # ══════════════════════════════════════════════════════════════════
50
+ # SEGMENTATION
51
+ # ══════════════════════════════════════════════════════════════════
52
+
53
+ def segment_text(text, clip_tokenizer, max_content=18, overlap=4, max_segments=32):
54
+ full_tokens = clip_tokenizer.encode(text, add_special_tokens=False)
55
+ segments, stride, pos = [], max_content - overlap, 0
56
+ while pos < len(full_tokens) and len(segments) < max_segments:
57
+ end = min(pos + max_content, len(full_tokens))
58
+ chunk = full_tokens[pos:end]
59
+ sos = clip_tokenizer.bos_token_id or 49406
60
+ eos = clip_tokenizer.eos_token_id or 49407
61
+ ids = [sos] + chunk + [eos]
62
+ n_pad = 77 - len(ids)
63
+ ids = (ids + [0] * max(n_pad, 0))[:77]
64
+ mask = ([1] * min(len(chunk) + 2, 77) + [0] * max(n_pad, 0))[:77]
65
+ segments.append({
66
+ "input_ids": torch.tensor(ids, dtype=torch.long),
67
+ "attention_mask": torch.tensor(mask, dtype=torch.long),
68
+ })
69
+ if end >= len(full_tokens):
70
+ break
71
+ pos += stride
72
+ return segments
73
+
74
+
75
+ # ══════════════════════════════════════════════════════════════════
76
+ # ENCODING METHODS
77
+ # ══════════════════════════════════════════════════════════════════
78
+
79
+ @torch.no_grad()
80
+ def encode_standard_clip(prompt):
81
+ """Standard SD 1.5: truncate β†’ (1, 77, 768)"""
82
+ inputs = tokenizer(prompt, max_length=77, padding="max_length",
83
+ truncation=True, return_tensors="pt").to(DEVICE)
84
+ return text_encoder(input_ids=inputs.input_ids).last_hidden_state
85
+
86
+
87
+ @torch.no_grad()
88
+ def encode_seq77(prompt):
89
+ """
90
+ Seq77 model: full caption β†’ segmented β†’ memory β†’ reconstruct β†’ (1, 77, 768)
91
+ Direct drop-in replacement for CLIP's last_hidden_state.
92
+ """
93
+ out = seq_model(texts=[prompt], output_sequence=True)
94
+ return out.last_hidden_state.to(DTYPE) # (1, 77, 768)
95
+
96
+
97
+ @torch.no_grad()
98
+ def encode_seq77_pooled_eos_inject(prompt, alpha=1.0):
99
+ """
100
+ Hybrid: standard CLIP sequence + seq77 pooled embedding at EOS-1.
101
+ Uses the seq77 model's improved pooled output (m_acc=0.957).
102
+ """
103
+ clip_embeds = encode_standard_clip(prompt).clone()
104
+
105
+ # Get pooled from seq model
106
+ pooled = seq_model.encode(prompt) # (768,)
107
+ pooled = pooled.unsqueeze(0) # (1, 768)
108
+
109
+ # Find EOS
110
+ inputs = tokenizer(prompt, max_length=77, padding="max_length",
111
+ truncation=True, return_tensors="pt")
112
+ eos_positions = (inputs.input_ids == 49407).nonzero(as_tuple=True)[1]
113
+ eos_pos = eos_positions[0].item() if len(eos_positions) > 0 else 76
114
+ inject_pos = max(eos_pos - 1, 1)
115
+
116
+ orig = clip_embeds[:, inject_pos, :]
117
+ clip_embeds[:, inject_pos, :] = (orig + alpha * (pooled - orig)).to(DTYPE)
118
+ return clip_embeds
119
+
120
+
121
+ @torch.no_grad()
122
+ def encode_seq77_blended(prompt, alpha=0.5):
123
+ """
124
+ Blend: alpha Γ— seq77_sequence + (1-alpha) Γ— standard_clip_sequence.
125
+ """
126
+ clip_embeds = encode_standard_clip(prompt)
127
+ seq_embeds = encode_seq77(prompt)
128
+ blended = clip_embeds + alpha * (seq_embeds - clip_embeds)
129
+ return blended
130
+
131
+
132
+ # ══════════════════════════════════════════════════════════════════
133
+ # GENERATION
134
+ # ══════════════════════════════════════════════════════════════════
135
+
136
+ @torch.no_grad()
137
+ def generate(prompt_embeds, negative_embeds=None,
138
+ steps=30, cfg=7.5, seed=42, h=512, w=512):
139
+ gen = torch.Generator(device=DEVICE).manual_seed(seed)
140
+ if negative_embeds is None:
141
+ negative_embeds = torch.zeros_like(prompt_embeds)
142
+ text_emb = torch.cat([negative_embeds, prompt_embeds])
143
+ latents = torch.randn(
144
+ (1, unet.config.in_channels, h // 8, w // 8),
145
+ generator=gen, device=DEVICE, dtype=DTYPE)
146
+ latents = latents * scheduler.init_noise_sigma
147
+ scheduler.set_timesteps(steps)
148
+ for t in scheduler.timesteps:
149
+ lat_in = scheduler.scale_model_input(torch.cat([latents] * 2), t)
150
+ pred = unet(lat_in, t, encoder_hidden_states=text_emb).sample
151
+ pu, pt = pred.chunk(2)
152
+ pred = pu + cfg * (pt - pu)
153
+ latents = scheduler.step(pred, t, latents).prev_sample
154
+ latents = latents / vae.config.scaling_factor
155
+ img = vae.decode(latents).sample
156
+ img = (img / 2 + 0.5).clamp(0, 1)
157
+ img = img.cpu().permute(0, 2, 3, 1).float().numpy()
158
+ return Image.fromarray((img[0] * 255).astype("uint8"))
159
+
160
+
161
+ # ============================================================================
162
+ # PURE BLEND: Standard CLIP ↔ Seq77 at 0.25, 0.50, 0.75
163
+ #
164
+ # output = (1 - Ξ±) Γ— CLIP_sequence + Ξ± Γ— Seq77_sequence
165
+ #
166
+ # No EOS injection. No img2img. Just the raw blend.
167
+ # Run after sd15_seq77_test.py (models loaded)
168
+ # ============================================================================
169
+
170
+ import os
171
+ from PIL import Image
172
+
173
+ os.makedirs("outputs", exist_ok=True)
174
+
175
+ neg = encode_standard_clip("")
176
+
177
+ prompts = {
178
+ "castle": (
179
+ "A vast sweeping landscape of rolling green hills under dramatic "
180
+ "storm clouds with a lone oak tree in the foreground its branches "
181
+ "bent by wind casting long shadows across a field of wildflowers "
182
+ "in purple yellow and white while in the distance a medieval stone "
183
+ "castle sits atop a cliff overlooking a turbulent sea with waves "
184
+ "crashing against ancient rocks and seabirds wheeling overhead "
185
+ "against a sky painted in shades of grey and gold as the sun "
186
+ "breaks through the clouds illuminating the castle towers"
187
+ ),
188
+
189
+ "still_life": (
190
+ "A meticulously arranged still life painting in the Dutch Golden Age "
191
+ "style featuring a silver goblet overflowing with deep red wine next "
192
+ "to a half peeled lemon with its rind spiraling downward and a cracked "
193
+ "walnut revealing its inner flesh beside a porcelain plate holding "
194
+ "slices of rare roast beef garnished with fresh rosemary sprigs and "
195
+ "a small bouquet of wilting tulips in shades of pink and white all set "
196
+ "against a dark moody background with dramatic chiaroscuro lighting "
197
+ "that highlights the reflective surfaces and textures of each object "
198
+ "while casting deep shadows that add depth and mystery to the composition "
199
+ "with a single fly resting on the edge of the goblet and droplets of "
200
+ "condensation catching the light on the silver surface"
201
+ ),
202
+
203
+ "short": "A medieval castle on a cliff overlooking the sea at sunset",
204
+ }
205
+
206
+ alphas = [0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0]
207
+
208
+ for name, prompt in prompts.items():
209
+ n_tokens = len(seq_model.clip_tokenizer.encode(prompt))
210
+ print(f"\n{'='*60}")
211
+ print(f"{name} ({n_tokens} tokens)")
212
+ print(f"{'='*60}")
213
+
214
+ clip_seq = encode_standard_clip(prompt)
215
+ mem_seq = encode_seq77(prompt)
216
+
217
+ # Log overall cosine between the two
218
+ cos = F.cosine_similarity(
219
+ clip_seq.float().mean(1), mem_seq.float().mean(1)).item()
220
+ print(f" CLIP ↔ Seq77 mean cosine: {cos:.4f}")
221
+
222
+ images = []
223
+ for alpha in alphas:
224
+ label = f"Ξ±={alpha:.2f}"
225
+ print(f" {label}...", end=" ", flush=True)
226
+
227
+ blended = clip_seq.float() + alpha * (mem_seq.float() - clip_seq.float())
228
+ blended = blended.to(DTYPE)
229
+
230
+ img = generate(blended, neg, steps=50, seed=42)
231
+ img.save(f"outputs/blend_{name}_a{alpha:.2f}.png")
232
+ images.append((label, img))
233
+ print("done")
234
+
235
+ combined = Image.new("RGB", (512 * len(images), 512))
236
+ for i, (label, img) in enumerate(images):
237
+ combined.paste(img, (512 * i, 0))
238
+ combined.save(f"outputs/blend_{name}_combined.png")
239
+ print(f" Saved: outputs/blend_{name}_combined.png")
240
+ print(f" {' | '.join(l for l, _ in images)}")
241
+
242
+ print("\nDONE")