WhiteAiZ commited on
Commit
016a0c1
·
verified ·
1 Parent(s): 08704cb

Delete extensions-builtin/forge_preprocessor_reference

Browse files
extensions-builtin/forge_preprocessor_reference/scripts/forge_reference.py DELETED
@@ -1,231 +0,0 @@
1
- import torch
2
-
3
- from modules_forge.supported_preprocessor import Preprocessor, PreprocessorParameter
4
- from modules_forge.shared import add_supported_preprocessor
5
- from ldm_patched.modules.samplers import sampling_function
6
- import ldm_patched.ldm.modules.attention as attention
7
-
8
-
9
- def sdp(q, k, v, transformer_options):
10
- if q.shape[0] == 0:
11
- return q
12
-
13
- return attention.optimized_attention(q, k, v, heads=transformer_options["n_heads"], mask=None)
14
-
15
-
16
- def adain(x, target_std, target_mean):
17
- if x.shape[0] == 0:
18
- return x
19
-
20
- std, mean = torch.std_mean(x, dim=(2, 3), keepdim=True, correction=0)
21
- return (((x - mean) / std) * target_std) + target_mean
22
-
23
-
24
- def zero_cat(a, b, dim):
25
- if a.shape[0] == 0:
26
- return b
27
- if b.shape[0] == 0:
28
- return a
29
- return torch.cat([a, b], dim=dim)
30
-
31
-
32
- class PreprocessorReference(Preprocessor):
33
- def __init__(self, name, use_attn=True, use_adain=True, priority=0):
34
- super().__init__()
35
- self.name = name
36
- self.use_attn = use_attn
37
- self.use_adain = use_adain
38
- self.sorting_priority = priority
39
- self.tags = ['Reference']
40
- self.slider_resolution = PreprocessorParameter(visible=False)
41
- self.slider_1 = PreprocessorParameter(label='Style Fidelity', value=0.5, minimum=0.0, maximum=1.0, step=0.01, visible=True)
42
- self.show_control_mode = False
43
- self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = False
44
- self.do_not_need_model = True
45
-
46
- self.is_recording_style = False
47
- self.recorded_attn1 = {}
48
- self.recorded_h = {}
49
-
50
- def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
51
- unit = kwargs['unit']
52
- weight = float(unit.weight)
53
- style_fidelity = float(unit.threshold_a)
54
- start_percent = float(unit.guidance_start)
55
- end_percent = float(unit.guidance_end)
56
-
57
- if process.sd_model.is_sdxl:
58
- style_fidelity = style_fidelity ** 3.0 # sdxl is very sensitive to reference so we lower the weights
59
-
60
- vae = process.sd_model.forge_objects.vae
61
- # This is a powerful VAE with integrated memory management, bf16, and tiled fallback.
62
-
63
- latent_image = vae.encode(cond.movedim(1, -1))
64
- latent_image = process.sd_model.forge_objects.unet.model.latent_format.process_in(latent_image)
65
-
66
- gen_seed = process.seeds[0] + 1
67
- gen_cpu = torch.Generator().manual_seed(gen_seed)
68
-
69
- unet = process.sd_model.forge_objects.unet.clone()
70
- sigma_max = unet.model.model_sampling.percent_to_sigma(start_percent)
71
- sigma_min = unet.model.model_sampling.percent_to_sigma(end_percent)
72
-
73
- self.recorded_attn1 = {}
74
- self.recorded_h = {}
75
-
76
- def conditioning_modifier(model, x, timestep, uncond, cond, cond_scale, model_options, seed):
77
- sigma = timestep[0].item()
78
- if not (sigma_min <= sigma <= sigma_max):
79
- return model, x, timestep, uncond, cond, cond_scale, model_options, seed
80
-
81
- self.is_recording_style = True
82
-
83
- xt = latent_image.to(x) + torch.randn(x.size(), dtype=x.dtype, generator=gen_cpu).to(x) * sigma
84
- sampling_function(model, xt, timestep, uncond, cond, 1, model_options, seed)
85
-
86
- self.is_recording_style = False
87
-
88
- return model, x, timestep, uncond, cond, cond_scale, model_options, seed
89
-
90
- def block_proc(h, flag, transformer_options):
91
- if not self.use_adain:
92
- return h
93
-
94
- if flag != 'after':
95
- return h
96
-
97
- location = transformer_options['block']
98
-
99
- sigma = transformer_options["sigmas"][0].item()
100
- if not (sigma_min <= sigma <= sigma_max):
101
- return h
102
-
103
- channel = int(h.shape[1])
104
- minimal_channel = 1500 - 1000 * weight
105
-
106
- if channel < minimal_channel:
107
- return h
108
-
109
- if self.is_recording_style:
110
- self.recorded_h[location] = torch.std_mean(h, dim=(2, 3), keepdim=True, correction=0)
111
- return h
112
- else:
113
- cond_indices = transformer_options['cond_indices']
114
- uncond_indices = transformer_options['uncond_indices']
115
- cond_or_uncond = transformer_options['cond_or_uncond']
116
- r_std, r_mean = self.recorded_h[location]
117
-
118
- h_c = h[cond_indices]
119
- h_uc = h[uncond_indices]
120
-
121
- o_c = adain(h_c, r_std, r_mean)
122
- o_uc_strong = h_uc
123
- o_uc_weak = adain(h_uc, r_std, r_mean)
124
- o_uc = o_uc_weak + (o_uc_strong - o_uc_weak) * style_fidelity
125
-
126
- recon = []
127
- for cx in cond_or_uncond:
128
- if cx == 0:
129
- recon.append(o_c)
130
- else:
131
- recon.append(o_uc)
132
-
133
- o = torch.cat(recon, dim=0)
134
- return o
135
-
136
- def attn1_proc(q, k, v, transformer_options):
137
- if not self.use_attn:
138
- return sdp(q, k, v, transformer_options)
139
-
140
- sigma = transformer_options["sigmas"][0].item()
141
- if not (sigma_min <= sigma <= sigma_max):
142
- return sdp(q, k, v, transformer_options)
143
-
144
- location = (transformer_options['block'][0], transformer_options['block'][1],
145
- transformer_options['block_index'])
146
-
147
- channel = int(q.shape[2])
148
- minimal_channel = 1500 - 1280 * weight
149
-
150
- if channel < minimal_channel:
151
- return sdp(q, k, v, transformer_options)
152
-
153
- if self.is_recording_style:
154
- self.recorded_attn1[location] = (k, v)
155
- return sdp(q, k, v, transformer_options)
156
- else:
157
- if 'cond_indices' in transformer_options and 'uncond_indices' in transformer_options:
158
- cond_indices = transformer_options['cond_indices']
159
- uncond_indices = transformer_options['uncond_indices']
160
- cond_or_uncond = transformer_options['cond_or_uncond']
161
- elif 'cond_or_uncond' in transformer_options:
162
- cond_or_uncond = transformer_options['cond_or_uncond']
163
- cond_indices = [i for i, x in enumerate(cond_or_uncond) if x == 0]
164
- uncond_indices = [i for i, x in enumerate(cond_or_uncond) if x != 0]
165
- else:
166
- # Handle the case where neither old nor new keys are present
167
- return sdp(q, k, v, transformer_options)
168
-
169
- q_c = q[cond_indices]
170
- q_uc = q[uncond_indices]
171
-
172
- k_c = k[cond_indices]
173
- k_uc = k[uncond_indices]
174
-
175
- v_c = v[cond_indices]
176
- v_uc = v[uncond_indices]
177
-
178
- k_r, v_r = self.recorded_attn1[location]
179
-
180
- # Check if shapes are compatible for concatenation
181
- if k_c.shape[1:] != k_r.shape[1:] or v_c.shape[1:] != v_r.shape[1:]:
182
- print(f"Shape mismatch: k_c={k_c.shape}, k_r={k_r.shape}, v_c={v_c.shape}, v_r={v_r.shape}")
183
- return sdp(q, k, v, transformer_options)
184
-
185
- try:
186
- o_c = sdp(q_c, zero_cat(k_c, k_r, dim=1), zero_cat(v_c, v_r, dim=1), transformer_options)
187
- o_uc_strong = sdp(q_uc, k_uc, v_uc, transformer_options)
188
- o_uc_weak = sdp(q_uc, zero_cat(k_uc, k_r, dim=1), zero_cat(v_uc, v_r, dim=1), transformer_options)
189
- o_uc = o_uc_weak + (o_uc_strong - o_uc_weak) * style_fidelity
190
- except RuntimeError as e:
191
- print(f"Error in attn1_proc: {str(e)}")
192
- print(f"Shapes: q_c={q_c.shape}, k_c={k_c.shape}, k_r={k_r.shape}, v_c={v_c.shape}, v_r={v_r.shape}")
193
- return sdp(q, k, v, transformer_options)
194
-
195
- recon = []
196
- for cx in cond_or_uncond:
197
- if cx == 0:
198
- recon.append(o_c)
199
- else:
200
- recon.append(o_uc)
201
-
202
- o = torch.cat(recon, dim=0)
203
- return o
204
-
205
- unet.add_block_modifier(block_proc)
206
- unet.add_conditioning_modifier(conditioning_modifier)
207
- unet.set_model_replace_all(attn1_proc, 'attn1')
208
-
209
- process.sd_model.forge_objects.unet = unet
210
-
211
- return cond, mask
212
-
213
-
214
- add_supported_preprocessor(PreprocessorReference(
215
- name='reference_only',
216
- use_attn=True,
217
- use_adain=False,
218
- priority=100
219
- ))
220
-
221
- add_supported_preprocessor(PreprocessorReference(
222
- name='reference_adain',
223
- use_attn=False,
224
- use_adain=True
225
- ))
226
-
227
- add_supported_preprocessor(PreprocessorReference(
228
- name='reference_adain+attn',
229
- use_attn=True,
230
- use_adain=True
231
- ))