dikdimon commited on
Commit
16205ab
·
verified ·
1 Parent(s): 448a955

Update webUI_ExtraSchedulers/scripts/extra_schedulers.py

Browse files
webUI_ExtraSchedulers/scripts/extra_schedulers.py CHANGED
@@ -1,432 +1,441 @@
1
- import gradio
2
- import math, numpy
3
- import torch
4
- from modules import scripts, shared
5
-
6
- # Python 3.10+, PyTorch 2.1+, NumPy 1.24+
7
- def get_sigmas_oss(n, sigma_min, sigma_max, device):
8
- """
9
- Optimal Steps schedule (OSS).
10
- Исправлено:
11
- - Больше нет обращения к несуществующей переменной `sigmas`.
12
- - Всегда возвращаем тензор float32 на переданном `device`.
13
- - Порядок веток по типу модели: SD3/Flux → SDXL → общий (SD1/2).
14
- Примечание: пресеты подобраны «в абсолютных» единицах под семейства моделей,
15
- поэтому sigma_min/sigma_max здесь намеренно не используются.
16
- """
17
- import numpy
18
- import torch
19
- from modules import shared
20
-
21
- def loglinear_interp(values: list[float], num_steps: int) -> numpy.ndarray:
22
- """Лог-линейная интерполяция убывающей последовательности до num_steps."""
23
- arr = numpy.asarray(values, dtype=float)
24
- xs = numpy.linspace(0.0, 1.0, arr.shape[0])
25
- ys = numpy.log(arr[::-1]) # в возрастающую + логарифм
26
- new_xs = numpy.linspace(0.0, 1.0, num_steps)
27
- new_ys = numpy.interp(new_xs, xs, ys) # интерполяция в лог-пространстве
28
- out = numpy.exp(new_ys)[::-1].copy() # обратно и снова убывающая
29
- return out
30
-
31
- m = shared.sd_model
32
-
33
- # 1) Флоу-семейство (SD3/Flux) нормализованный пресет ~[1..0]
34
- if getattr(m, "is_sd3", False) or getattr(m, "is_flux", False):
35
- base_sigmas = [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001]
36
-
37
- # 2) SDXL свой AYS11 пресет
38
- elif getattr(m, "is_sdxl", False):
39
- base_sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
40
-
41
- # 3) SD1.x/SD2.x и прочие — общий AYS11
42
- else:
43
- base_sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
44
-
45
- # Подгоняем длину к n и добавляем терминальный 0.0 (итого n+1 значений)
46
- if n != len(base_sigmas):
47
- sigmas_np = loglinear_interp(base_sigmas, n)
48
- sigmas_np = numpy.append(sigmas_np, [0.0])
49
- else:
50
- sigmas_np = numpy.asarray(base_sigmas + [0.0], dtype=float)
51
-
52
- # Единый путь возврата: float32 на переданном device
53
- return torch.as_tensor(sigmas_np, dtype=torch.float32, device=device)
54
-
55
-
56
-
57
-
58
- def cosine_scheduler (n, sigma_min, sigma_max, device):
59
- sigmas = torch.zeros(n, device=device)
60
- if n == 1:
61
- sigmas[0] = sigma_max ** 0.5
62
- else:
63
- for x in range(n):
64
- p = x / (n-1)
65
- C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
66
- sigmas[x] = C
67
- return torch.cat([sigmas, sigmas.new_zeros([1])])
68
-
69
- def cosexpblend_boost_scheduler (n, sigma_min, sigma_max, device):
70
- sigmas = []
71
- if n == 1:
72
- sigmas.append(sigma_max ** 0.5)
73
- else:
74
- K = (sigma_min / sigma_max)**(1/(n-1))
75
- E = sigma_max
76
- detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.27, 1.0, 1.0])
77
- for x in range(n):
78
- p = x / (n-1)
79
- C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
80
- sigmas.append(detail[x] * (C + p * (E - C)))
81
- E *= K
82
-
83
- sigmas += [0.0]
84
- return torch.FloatTensor(sigmas).to(device)
85
-
86
- def cosexpblend_scheduler (n, sigma_min, sigma_max, device):
87
- sigmas = []
88
- if n == 1:
89
- sigmas.append(sigma_max ** 0.5)
90
- else:
91
- K = (sigma_min / sigma_max)**(1/(n-1))
92
- E = sigma_max
93
- for x in range(n):
94
- p = x / (n-1)
95
- C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
96
- sigmas.append(C + p * (E - C))
97
- E *= K
98
- sigmas += [0.0]
99
- return torch.FloatTensor(sigmas).to(device)
100
-
101
- ## phi scheduler modified from original by @extraltodeus
102
- def phi_scheduler(n, sigma_min, sigma_max, device):
103
- sigmas = torch.zeros(n, device=device)
104
- if n == 1:
105
- sigmas[0] = sigma_max ** 0.5
106
- else:
107
- phi = (1 + 5**0.5) / 2
108
- for x in range(n):
109
- sigmas[x] = sigma_min + (sigma_max-sigma_min)*((1-x/(n-1))**(phi*phi))
110
- return torch.cat([sigmas, sigmas.new_zeros([1])])
111
-
112
- def get_sigmas_vp(n, sigma_min, sigma_max, device='cpu'):
113
- """Constructs a continuous VP noise schedule."""
114
-
115
- beta_d = 19.9
116
- beta_min = 0.1
117
- eps_s = 1e-3
118
-
119
- t = torch.linspace(1, eps_s, n, device=device)
120
- sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
121
- return torch.cat([sigmas, sigmas.new_zeros([1])])
122
-
123
- def get_sigmas_laplace(n, sigma_min, sigma_max, device='cpu'):
124
- """Constructs the noise schedule proposed by Tiankai et al. (2024). """
125
- mu = 0.
126
- beta = 0.5
127
- epsilon = 1e-5 # avoid log(0)
128
- x = torch.linspace(0, 1, n, device=device)
129
- clamp = lambda x: torch.clamp(x, min=sigma_min, max=sigma_max)
130
- lmb = mu - beta * torch.sign(0.5-x) * torch.log(1 - 2 * torch.abs(0.5-x) + epsilon)
131
- sigmas = clamp(torch.exp(lmb))
132
- return torch.cat([sigmas, sigmas.new_zeros([1])])
133
-
134
-
135
-
136
- def get_sigmas_sinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
137
- """Constructs a sinusoidal noise schedule."""
138
- sf = 3.5
139
- x = torch.linspace(0, 1, n, device=device)
140
- sigmas = (sigma_min + (sigma_max - sigma_min) * (1 - torch.sin(torch.pi / 2 * x)))/sigma_max
141
- sigmas = sigmas**sf
142
- sigmas = sigmas * sigma_max
143
- return torch.cat([sigmas, sigmas.new_zeros([1])])
144
-
145
- def get_sigmas_invcosinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
146
- """Constructs a sinusoidal noise schedule."""
147
- sf = 3.5
148
- x = torch.linspace(0, 1, n, device=device)
149
- sigmas = (sigma_min + (sigma_max - sigma_min) * (0.5*(torch.cos(x * math.pi) + 1)))/sigma_max
150
- sigmas = sigmas**sf
151
- sigmas = sigmas * sigma_max
152
- return torch.cat([sigmas, sigmas.new_zeros([1])])
153
-
154
- def get_sigmas_react_cosinusoidal_dynsf(n, sigma_min, sigma_max, device='cpu'):
155
- """Constructs a sinusoidal noise schedule."""
156
- sf = 2.15
157
- x = torch.linspace(0, 1, n, device=device)
158
- sigmas = (sigma_min+(sigma_max-sigma_min)*(torch.cos(x*(torch.pi/2))))/sigma_max
159
- sigmas = sigmas**(sf*(n*x/n))
160
- sigmas = sigmas * sigma_max
161
- return torch.cat([sigmas, sigmas.new_zeros([1])])
162
-
163
- def get_sigmas_karras_dynamic(n, sigma_min, sigma_max, device='cpu'):
164
- """Constructs the noise schedule of Karras et al. (2022)."""
165
- rho = 7.
166
- ramp = torch.linspace(0, 1, n, device=device)
167
- min_inv_rho = sigma_min ** (1 / rho)
168
- max_inv_rho = sigma_max ** (1 / rho)
169
- sigmas = torch.zeros_like(ramp)
170
- for i in range(n):
171
- sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (math.cos(i*math.tau/n)*2+rho)
172
- return torch.cat([sigmas, sigmas.new_zeros([1])])
173
-
174
- def get_sigmas_karras_exponential_decay(n, sigma_min, sigma_max, device='cpu'):
175
- """Constructs the noise schedule of Karras et al. (2022)."""
176
- rho = 7.
177
- ramp = torch.linspace(0, 1, n, device=device)
178
- min_inv_rho = sigma_min ** (1 / rho)
179
- max_inv_rho = sigma_max ** (1 / rho)
180
- sigmas = torch.zeros_like(ramp)
181
- for i in range(n):
182
- sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho-(3*i/n))
183
- return torch.cat([sigmas, sigmas.new_zeros([1])])
184
-
185
- def get_sigmas_karras_exponential_increment(n, sigma_min, sigma_max, device='cpu'):
186
- """Constructs the noise schedule of Karras et al. (2022)."""
187
- rho = 7.
188
- ramp = torch.linspace(0, 1, n, device=device)
189
- min_inv_rho = sigma_min ** (1 / rho)
190
- max_inv_rho = sigma_max ** (1 / rho)
191
- sigmas = torch.zeros_like(ramp)
192
- for i in range(n):
193
- sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho+3*i/n)
194
- return torch.cat([sigmas, sigmas.new_zeros([1])])
195
-
196
- def custom_scheduler(n, sigma_min, sigma_max, device):
197
- if 'import' in ExtraScheduler.customSigmas:
198
- sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
199
- elif 'eval' in ExtraScheduler.customSigmas:
200
- sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
201
- elif 'scripts' in ExtraScheduler.customSigmas:
202
- sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
203
-
204
- elif ExtraScheduler.customSigmas[0] == '[' and ExtraScheduler.customSigmas[-1] == ']':
205
- sigmasList = [float(x) for x in ExtraScheduler.customSigmas.strip('[]').split(',')]
206
-
207
- if sigmasList[0] == 1.0 and sigmasList[-1] == 0.0:
208
- for x in range(len(sigmasList)):
209
- sigmasList[x] *= (sigma_max - sigma_min)
210
- sigmasList[x] += sigma_min
211
- elif sigmasList[-1] == 0.0:
212
- #don't interpolate to number of steps, use as is
213
- return torch.tensor(sigmasList)
214
-
215
- xs = numpy.linspace(0, 1, len(sigmasList))
216
- ys = numpy.log(sigmasList[::-1])
217
-
218
- new_xs = numpy.linspace(0, 1, n)
219
- new_ys = numpy.interp(new_xs, xs, ys)
220
-
221
- interpolated_ys = numpy.exp(new_ys)[::-1].copy()
222
- sigmas = torch.tensor(interpolated_ys, device=device)
223
- else:
224
- sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
225
- detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.25, 1.0, 1.0])
226
-
227
- phi = (1 + 5**0.5) / 2
228
- pi = math.pi
229
-
230
- s = 0
231
- while (s < n):
232
- x = (s) / (n - 1)
233
- M = sigma_max
234
- m = sigma_min
235
- d = detail[s]
236
-
237
- sigmas[s] = eval((ExtraScheduler.customSigmas))
238
- s += 1
239
- return torch.cat([sigmas, sigmas.new_zeros([1])])
240
-
241
- from scripts.simple_kes import get_sigmas_simple_kes
242
-
243
- from scripts.res_solver import sample_res_solver, sample_res_multistep, sample_res_multistep_cfgpp
244
- from scripts.clybius_dpmpp_4m_sde import sample_clyb_4m_sde_momentumized
245
- from scripts.gradient_estimation import sample_gradient_e, sample_gradient_e_cfgpp
246
- from scripts.seeds import sample_seeds_2, sample_seeds_3
247
-
248
- from modules import sd_samplers_common, sd_samplers
249
- from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
250
-
251
- class ExtraScheduler(scripts.Script):
252
- sorting_priority = 99
253
-
254
- installed = False
255
- customSigmas = 'm + (M-m)*(1-x)**3'
256
-
257
- def title(self):
258
- return "Extra Schedulers (custom)"
259
-
260
- def show(self, is_img2img):
261
- # make this extension visible in both txt2img and img2img tab.
262
- if ExtraScheduler.installed:
263
- return scripts.AlwaysVisible
264
- else:
265
- return False
266
-
267
- def ui(self, *args, **kwargs):
268
- #with gradio.Accordion(open=False, label=self.title(), visible=ExtraScheduler.installed):
269
- custom_sigmas = gradio.Textbox(value=ExtraScheduler.customSigmas, label='Extra Schedulers: custom function / list [n0, n1, n2, ...]', lines=1.01)
270
-
271
- self.infotext_fields = [
272
- (custom_sigmas, "es_custom"),
273
- ]
274
-
275
- return [custom_sigmas]
276
-
277
- def process(self, params, *script_args, **kwargs):
278
- if params.scheduler == 'custom':
279
- custom_sigmas = script_args[0]
280
- ExtraScheduler.customSigmas = custom_sigmas
281
- params.extra_generation_params.update(dict(es_custom = ExtraScheduler.customSigmas, ))
282
- elif params.scheduler == 'Simple KES':
283
- params.extra_generation_params.update(dict(
284
- es_KES_start_blend = getattr(shared.opts, 'kes_start_blend'),
285
- es_KES_end_blend = getattr(shared.opts, 'kes_end_blend'),
286
- es_KES_sharpness = getattr(shared.opts, 'kes_sharpness'),
287
- es_KES_initial_step_size = getattr(shared.opts, 'kes_initial_step_size'),
288
- es_KES_final_step_size = getattr(shared.opts, 'kes_final_step_size'),
289
- es_KES_initial_noise = getattr(shared.opts, 'kes_initial_noise'),
290
- es_KES_final_noise = getattr(shared.opts, 'kes_final_noise'),
291
- es_KES_smooth_blend = getattr(shared.opts, 'kes_smooth_blend'),
292
- es_KES_step_size_factor = getattr(shared.opts, 'kes_step_size_factor'),
293
- es_KES_noise_scale = getattr(shared.opts, 'kes_noise_scale'),
294
- ))
295
- return
296
-
297
- try:
298
- import modules.sd_schedulers as schedulers
299
-
300
- if True:
301
- # убираем уже зарегистрированные версии с тем же именем/лейблом
302
- def _drop(name=None, label=None):
303
- schedulers.schedulers = [
304
- s for s in getattr(schedulers, "schedulers", [])
305
- if (name is not None and getattr(s, "name", None) == name) is False
306
- and (label is not None and getattr(s, "label", None) == label) is False
307
- ]
308
-
309
- _drop(name="optimal_ss"); _drop(label="Optimal Steps")
310
- _drop(name="custom"); _drop(label="custom")
311
-
312
- print("Extension: Extra Schedulers: (re)adding schedulers")
313
-
314
- # далее — как у вас: создаём объекты Scheduler(...)
315
-
316
- print("Extension: Extra Schedulers: adding new schedulers")
317
- CosineScheduler = schedulers.Scheduler("cosine", "Cosine", cosine_scheduler)
318
- CosExpScheduler = schedulers.Scheduler("cosexp", "CosineExponential blend", cosexpblend_scheduler)
319
- CosExpBScheduler = schedulers.Scheduler("cosprev", "CosExp blend boost", cosexpblend_boost_scheduler)
320
- PhiScheduler = schedulers.Scheduler("phi", "Phi", phi_scheduler)
321
- VPScheduler = schedulers.Scheduler("vp", "VP", get_sigmas_vp)
322
- LaplaceScheduler = schedulers.Scheduler("laplace", "Laplace", get_sigmas_laplace)
323
-
324
- SineScheduler = schedulers.Scheduler("sine_sc", "Sine scaled", get_sigmas_sinusoidal_sf)
325
- InvCosScheduler = schedulers.Scheduler("inv_cos_sc", "Inverse Cosine scaled", get_sigmas_invcosinusoidal_sf)
326
- CosDynScheduler = schedulers.Scheduler("cosine_dyn", "Cosine Dynamic", get_sigmas_react_cosinusoidal_dynsf)
327
- KarrasDynScheduler = schedulers.Scheduler("karras_dyn", "Karras Dynamic", get_sigmas_karras_dynamic)
328
- KarrasExpDecayScheduler = schedulers.Scheduler("karras_exp_d", "Karras Exp Decay", get_sigmas_karras_exponential_decay)
329
- KarrasExpIncScheduler = schedulers.Scheduler("karras_exp_i", "Karras Exp Inc", get_sigmas_karras_exponential_increment)
330
-
331
- SimpleKEScheduler = schedulers.Scheduler("simple_kes", "Simple KES", get_sigmas_simple_kes)
332
- OSSFlowScheduler = schedulers.Scheduler("optimal_ss", "Optimal Steps", get_sigmas_oss)
333
- CustomScheduler = schedulers.Scheduler("custom", "custom", custom_scheduler)
334
-
335
- schedulers.schedulers.append(CosineScheduler)
336
- schedulers.schedulers.append(CosExpScheduler)
337
- schedulers.schedulers.append(CosExpBScheduler)
338
- schedulers.schedulers.append(PhiScheduler)
339
- schedulers.schedulers.append(VPScheduler)
340
- schedulers.schedulers.append(LaplaceScheduler)
341
- schedulers.schedulers.append(SineScheduler)
342
- schedulers.schedulers.append(InvCosScheduler)
343
- schedulers.schedulers.append(CosDynScheduler)
344
- schedulers.schedulers.append(KarrasDynScheduler)
345
- schedulers.schedulers.append(KarrasExpDecayScheduler)
346
- schedulers.schedulers.append(KarrasExpIncScheduler)
347
- schedulers.schedulers.append(SimpleKEScheduler)
348
- schedulers.schedulers.append(OSSFlowScheduler)
349
- schedulers.schedulers.append(CustomScheduler)
350
-
351
- schedulers.schedulers_map = {
352
- **{x.name: x for x in schedulers.schedulers},
353
- **{x.label: x for x in schedulers.schedulers}
354
- }
355
-
356
- # CFG++ method is Forge only, not working in A1111
357
- from modules import sd_samplers_common, sd_samplers
358
- from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
359
- from scripts.samplers_cfgpp import (
360
- sample_euler_ancestral_cfgpp, sample_euler_cfgpp, sample_euler_dy_cfgpp,
361
- sample_euler_smea_dy_cfgpp, sample_euler_negative_cfgpp, sample_euler_negative_dy_cfgpp
362
- )
363
- from scripts.forgeClassic_cfgpp import (
364
- sample_dpmpp_sde_cfgpp, sample_dpmpp_2m_cfgpp,
365
- sample_dpmpp_2m_sde_cfgpp, sample_dpmpp_3m_sde_cfgpp,
366
- sample_dpmpp_2s_ancestral_cfgpp
367
- )
368
-
369
- samplers_cfgpp = [
370
- ("Euler a CFG++", sample_euler_ancestral_cfgpp, ["k_euler_a_cfgpp"], {"uses_ensd": True}),
371
- ("Euler CFG++", sample_euler_cfgpp, ["k_euler_cfgpp"], {}),
372
- ("Euler Dy CFG++", sample_euler_dy_cfgpp, ["k_euler_dy_cfgpp"], {}),
373
- ("Euler SMEA Dy CFG++", sample_euler_smea_dy_cfgpp, ["k_euler_smea_dy_cfgpp"], {}),
374
- ("Euler Negative CFG++", sample_euler_negative_cfgpp, ["k_euler_negative_cfgpp"], {}),
375
- ("Euler Negative Dy CFG++", sample_euler_negative_dy_cfgpp, ["k_euler_negative_dy_cfgpp"], {}),
376
- ("RES multistep CFG++", sample_res_multistep_cfgpp, ["k_res_multi_cfgpp"], {}),
377
- ("Gradient Estimation CFG++", sample_gradient_e_cfgpp, ["k_grad_est_cfgpp"], {}),
378
- # ("GE/DPM2 CFG++", sample_ge_dpm2_cfgpp, ["k_ge_dpm_cfgpp"], {}),
379
- ("DPM++ SDE CFG++", sample_dpmpp_sde_cfgpp, ["k_dpmpp_sde_cfgpp"], {"brownian_noise": True, "second_order": True}),
380
- ("DPM++ 2M CFG++", sample_dpmpp_2m_cfgpp, ["k_dpmpp_2m_cfgpp"], {}),
381
- ("DPM++ 2M SDE CFG++", sample_dpmpp_2m_sde_cfgpp, ["k_dpmpp_2m_sde_cfgpp"], {"brownian_noise": True}),
382
- ("DPM++ 3M SDE CFG++", sample_dpmpp_3m_sde_cfgpp, ["k_dpmpp_3m_sde_cfgpp"], {"brownian_noise": True, 'discard_next_to_last_sigma': True}),
383
- ("DPM++ 2S a CFG++", sample_dpmpp_2s_ancestral_cfgpp,["k_dpmpp_2s_a_cfgpp"], {"uses_ensd": True, "second_order": True}),
384
- ]
385
-
386
- samplers_data_cfgpp = [
387
- sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
388
- for label, funcname, aliases, options in samplers_cfgpp
389
- if callable(funcname)
390
- ]
391
-
392
- sampler_extra_params['sample_euler_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
393
- sampler_extra_params['sample_euler_negative_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
394
- sampler_extra_params['sample_euler_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
395
- sampler_extra_params['sample_euler_negative_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
396
- sampler_extra_params['sample_euler_smea_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
397
-
398
- sampler_extra_params['sample_dpmpp_sde_cfgpp'] = ['s_noise']
399
- sampler_extra_params['sample_dpmpp_2m_sde_cfgpp'] = ['s_noise']
400
- sampler_extra_params['sample_dpmpp_3m_sde_cfgpp'] = ['s_noise']
401
- sampler_extra_params['sample_dpmpp_2s_ancestral_cfgpp']= ['s_noise']
402
-
403
- sd_samplers.all_samplers.extend(samplers_data_cfgpp)
404
- #except:
405
- #pass
406
-
407
- samplers_extra = [
408
- ("RES multistep", sample_res_multistep, ["k_res_multi"], {}),
409
- ("Refined Exponential Solver", sample_res_solver, ["k_res"], {}),
410
- ("DPM++ 4M SDE", sample_clyb_4m_sde_momentumized, ["k_dpmpp_4m_sde"], {}),
411
- ("Gradient Estimation", sample_gradient_e, ["k_grad_est"], {}),
412
- ("SEEDS-2", sample_seeds_2, ["k_seeds2"], {}),
413
- ("SEEDS-3", sample_seeds_3, ["k_seeds3"], {}),
414
- ]
415
- sampler_extra_params['sample_seeds_2'] = ['s_noise']
416
- sampler_extra_params['sample_seeds_3'] = ['s_noise']
417
-
418
- samplers_data_extra = [
419
- sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
420
- for label, funcname, aliases, options in samplers_extra
421
- if callable(funcname)
422
- ]
423
-
424
- sd_samplers.all_samplers.extend(samplers_data_extra)
425
- sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
426
- sd_samplers.set_samplers()
427
-
428
-
429
- ExtraScheduler.installed = True
430
- except:
431
- print ("Extension: Extra Schedulers: unsupported webUI")
432
- ExtraScheduler.installed = False
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ import math, numpy
3
+ import torch
4
+ from modules import scripts, shared
5
+
6
+ # Python 3.10+, PyTorch 2.1+, NumPy 1.24+
7
+ def get_sigmas_oss_improved(n, sigma_min, sigma_max, device):
8
+ """
9
+ OSS Improved:
10
+ - Сохраняет "умную" форму кривой (распределение шагов) от AYS/OSS.
11
+ - НО масштабирует её под ваши настройки sigma_min/sigma_max.
12
+ - Это позволяет использовать OSS с Turbo, LCM, Lightning и любыми другими моделями.
13
+ """
14
+ import numpy
15
+ import torch
16
+ from modules import shared
17
+
18
+ def loglinear_interp(values: list[float], num_steps: int) -> numpy.ndarray:
19
+ arr = numpy.asarray(values, dtype=float)
20
+ xs = numpy.linspace(0.0, 1.0, arr.shape[0])
21
+ ys = numpy.log(arr[::-1])
22
+ new_xs = numpy.linspace(0.0, 1.0, num_steps)
23
+ new_ys = numpy.interp(new_xs, xs, ys)
24
+ out = numpy.exp(new_ys)[::-1].copy()
25
+ return out
26
+
27
+ m = shared.sd_model
28
+
29
+ # 1. Выбираем базовый пресет (форму кривой)
30
+ if getattr(m, "is_sd3", False) or getattr(m, "is_flux", False):
31
+ base_sigmas = [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001]
32
+ elif getattr(m, "is_sdxl", False):
33
+ base_sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
34
+ else: # SD1.5 / SD2.x
35
+ base_sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
36
+
37
+ # 2. Интерполируем под нужное количество шагов
38
+ if n != len(base_sigmas):
39
+ sigmas_np = loglinear_interp(base_sigmas, n)
40
+ else:
41
+ sigmas_np = numpy.asarray(base_sigmas, dtype=float)
42
+
43
+ # 3. УЛУЧШЕНИЕ: Масштабирование под границы пользователя
44
+ # Находим диапазон самого пресета
45
+ preset_max = sigmas_np[0]
46
+ preset_min = sigmas_np[-1]
47
+
48
+ # Нормализуем пресет в диапазон 0..1
49
+ # (sigmas_np - min) / (max - min)
50
+ sigmas_norm = (sigmas_np - preset_min) / (preset_max - preset_min)
51
+
52
+ # Растягиваем нормализованную кривую на диапазон пользователя (sigma_min...sigma_max)
53
+ # Формула: norm * (new_max - new_min) + new_min
54
+ sigmas_scaled = sigmas_norm * (sigma_max - sigma_min) + sigma_min
55
+
56
+ # 4. Добавляем терминальный ноль (стандарт для k-diffusion)
57
+ sigmas_final = numpy.append(sigmas_scaled, [0.0])
58
+
59
+ return torch.as_tensor(sigmas_final, dtype=torch.float32, device=device)
60
+
61
+
62
+
63
+ def cosine_scheduler (n, sigma_min, sigma_max, device):
64
+ sigmas = torch.zeros(n, device=device)
65
+ if n == 1:
66
+ sigmas[0] = sigma_max ** 0.5
67
+ else:
68
+ for x in range(n):
69
+ p = x / (n-1)
70
+ C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
71
+ sigmas[x] = C
72
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
73
+
74
+ def cosexpblend_boost_scheduler (n, sigma_min, sigma_max, device):
75
+ sigmas = []
76
+ if n == 1:
77
+ sigmas.append(sigma_max ** 0.5)
78
+ else:
79
+ detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.27, 1.0, 1.0])
80
+
81
+ K = (sigma_min / sigma_max)**(1/(n-1))
82
+ E = sigma_max
83
+ for x in range(n):
84
+ p = x / (n-1)
85
+ C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
86
+ sigmas.append(detail[x] * (C + p * (E - C)))
87
+ E *= K
88
+
89
+ sigmas += [0.0]
90
+
91
+ return torch.FloatTensor(sigmas).to(device)
92
+
93
+ def cosexpblend_scheduler (n, sigma_min, sigma_max, device):
94
+ sigmas = []
95
+ if n == 1:
96
+ sigmas.append(sigma_max ** 0.5)
97
+ else:
98
+ K = (sigma_min / sigma_max)**(1/(n-1))
99
+ E = sigma_max
100
+ for x in range(n):
101
+ p = x / (n-1)
102
+ C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
103
+ sigmas.append(C + p * (E - C))
104
+ E *= K
105
+ sigmas += [0.0]
106
+ return torch.FloatTensor(sigmas).to(device)
107
+
108
+ ## phi scheduler modified from original by @extraltodeus
109
+ def phi_scheduler(n, sigma_min, sigma_max, device):
110
+ sigmas = torch.zeros(n, device=device)
111
+ if n == 1:
112
+ sigmas[0] = sigma_max ** 0.5
113
+ else:
114
+ phi = (1 + 5**0.5) / 2
115
+ for x in range(n):
116
+ sigmas[x] = sigma_min + (sigma_max-sigma_min)*((1-x/(n-1))**(phi*phi))
117
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
118
+
119
+ def get_sigmas_vp(n, sigma_min, sigma_max, device='cpu'):
120
+ """Constructs a continuous VP noise schedule."""
121
+
122
+ beta_d = 19.9
123
+ beta_min = 0.1
124
+ eps_s = 1e-3
125
+
126
+ t = torch.linspace(1, eps_s, n, device=device)
127
+ sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
128
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
129
+
130
+ def get_sigmas_laplace(n, sigma_min, sigma_max, device='cpu'):
131
+ """Constructs the noise schedule proposed by Tiankai et al. (2024). """
132
+ mu = 0.
133
+ beta = 0.5
134
+ epsilon = 1e-5 # avoid log(0)
135
+ x = torch.linspace(0, 1, n, device=device)
136
+ clamp = lambda x: torch.clamp(x, min=sigma_min, max=sigma_max)
137
+ lmb = mu - beta * torch.sign(0.5-x) * torch.log(1 - 2 * torch.abs(0.5-x) + epsilon)
138
+ sigmas = clamp(torch.exp(lmb))
139
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
140
+
141
+
142
+
143
+ def get_sigmas_sinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
144
+ """Constructs a sinusoidal noise schedule."""
145
+ sf = 3.5
146
+ x = torch.linspace(0, 1, n, device=device)
147
+ sigmas = (sigma_min + (sigma_max - sigma_min) * (1 - torch.sin(torch.pi / 2 * x)))/sigma_max
148
+ sigmas = sigmas**sf
149
+ sigmas = sigmas * sigma_max
150
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
151
+
152
+ def get_sigmas_invcosinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
153
+ """Constructs a sinusoidal noise schedule."""
154
+ sf = 3.5
155
+ x = torch.linspace(0, 1, n, device=device)
156
+ sigmas = (sigma_min + (sigma_max - sigma_min) * (0.5*(torch.cos(x * math.pi) + 1)))/sigma_max
157
+ sigmas = sigmas**sf
158
+ sigmas = sigmas * sigma_max
159
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
160
+
161
+ def get_sigmas_react_cosinusoidal_dynsf(n, sigma_min, sigma_max, device='cpu'):
162
+ """Constructs a sinusoidal noise schedule."""
163
+ sf = 2.15
164
+ x = torch.linspace(0, 1, n, device=device)
165
+ sigmas = (sigma_min+(sigma_max-sigma_min)*(torch.cos(x*(torch.pi/2))))/sigma_max
166
+ sigmas = sigmas**(sf*(n*x/n))
167
+ sigmas = sigmas * sigma_max
168
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
169
+
170
+ def get_sigmas_karras_dynamic(n, sigma_min, sigma_max, device='cpu'):
171
+ """Constructs the noise schedule of Karras et al. (2022)."""
172
+ rho = 7.
173
+ ramp = torch.linspace(0, 1, n, device=device)
174
+ min_inv_rho = sigma_min ** (1 / rho)
175
+ max_inv_rho = sigma_max ** (1 / rho)
176
+ sigmas = torch.zeros_like(ramp)
177
+ for i in range(n):
178
+ sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (math.cos(i*math.tau/n)*2+rho)
179
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
180
+
181
+ def get_sigmas_karras_exponential_decay(n, sigma_min, sigma_max, device='cpu'):
182
+ """Constructs the noise schedule of Karras et al. (2022)."""
183
+ rho = 7.
184
+ ramp = torch.linspace(0, 1, n, device=device)
185
+ min_inv_rho = sigma_min ** (1 / rho)
186
+ max_inv_rho = sigma_max ** (1 / rho)
187
+ sigmas = torch.zeros_like(ramp)
188
+ for i in range(n):
189
+ sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho-(3*i/n))
190
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
191
+
192
+ def get_sigmas_karras_exponential_increment(n, sigma_min, sigma_max, device='cpu'):
193
+ """Constructs the noise schedule of Karras et al. (2022)."""
194
+ rho = 7.
195
+ ramp = torch.linspace(0, 1, n, device=device)
196
+ min_inv_rho = sigma_min ** (1 / rho)
197
+ max_inv_rho = sigma_max ** (1 / rho)
198
+ sigmas = torch.zeros_like(ramp)
199
+ for i in range(n):
200
+ sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho+3*i/n)
201
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
202
+
203
+ def custom_scheduler(n, sigma_min, sigma_max, device):
204
+ if 'import' in ExtraScheduler.customSigmas:
205
+ sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
206
+ elif 'eval' in ExtraScheduler.customSigmas:
207
+ sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
208
+ elif 'scripts' in ExtraScheduler.customSigmas:
209
+ sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
210
+
211
+ elif ExtraScheduler.customSigmas[0] == '[' and ExtraScheduler.customSigmas[-1] == ']':
212
+ sigmasList = [float(x) for x in ExtraScheduler.customSigmas.strip('[]').split(',')]
213
+
214
+ if sigmasList[0] == 1.0 and sigmasList[-1] == 0.0:
215
+ for x in range(len(sigmasList)):
216
+ sigmasList[x] *= (sigma_max - sigma_min)
217
+ sigmasList[x] += sigma_min
218
+ elif sigmasList[-1] == 0.0:
219
+ #don't interpolate to number of steps, use as is
220
+ return torch.tensor(sigmasList)
221
+
222
+ xs = numpy.linspace(0, 1, len(sigmasList))
223
+ ys = numpy.log(sigmasList[::-1])
224
+
225
+ new_xs = numpy.linspace(0, 1, n)
226
+ new_ys = numpy.interp(new_xs, xs, ys)
227
+
228
+ interpolated_ys = numpy.exp(new_ys)[::-1].copy()
229
+ sigmas = torch.tensor(interpolated_ys, device=device)
230
+ else:
231
+ sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
232
+ detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.25, 1.0, 1.0])
233
+
234
+ phi = (1 + 5**0.5) / 2
235
+ pi = math.pi
236
+
237
+ s = 0
238
+ while (s < n):
239
+ x = (s) / (n - 1)
240
+ M = sigma_max
241
+ m = sigma_min
242
+ d = detail[s]
243
+
244
+ sigmas[s] = eval((ExtraScheduler.customSigmas))
245
+ s += 1
246
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
247
+
248
+ from scripts.simple_kes import get_sigmas_simple_kes
249
+
250
+ from scripts.res_solver import sample_res_solver, sample_res_multistep, sample_res_multistep_cfgpp
251
+ from scripts.clybius_dpmpp_4m_sde import sample_clyb_4m_sde_momentumized
252
+ from scripts.gradient_estimation import sample_gradient_e, sample_gradient_e_cfgpp, sample_gradient_e_2s_cfgpp
253
+ from scripts.seeds import sample_seeds_2, sample_seeds_3
254
+
255
+ from modules import sd_samplers_common, sd_samplers
256
+ from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
257
+
258
+ class ExtraScheduler(scripts.Script):
259
+ sorting_priority = 99
260
+
261
+ installed = False
262
+ customSigmas = 'm + (M-m)*(1-x)**3'
263
+
264
+ def title(self):
265
+ return "Extra Schedulers (custom)"
266
+
267
+ def show(self, is_img2img):
268
+ # make this extension visible in both txt2img and img2img tab.
269
+ if ExtraScheduler.installed:
270
+ return scripts.AlwaysVisible
271
+ else:
272
+ return False
273
+
274
+ def ui(self, *args, **kwargs):
275
+ #with gradio.Accordion(open=False, label=self.title(), visible=ExtraScheduler.installed):
276
+ custom_sigmas = gradio.Textbox(value=ExtraScheduler.customSigmas, label='Extra Schedulers: custom function / list [n0, n1, n2, ...]', lines=1.01)
277
+
278
+ self.infotext_fields = [
279
+ (custom_sigmas, "es_custom"),
280
+ ]
281
+
282
+ return [custom_sigmas]
283
+
284
+ def process(self, params, *script_args, **kwargs):
285
+ if params.scheduler == 'custom':
286
+ custom_sigmas = script_args[0]
287
+ ExtraScheduler.customSigmas = custom_sigmas
288
+ params.extra_generation_params.update(dict(es_custom = ExtraScheduler.customSigmas, ))
289
+ elif params.scheduler == 'Simple KES':
290
+ params.extra_generation_params.update(dict(
291
+ es_KES_start_blend = getattr(shared.opts, 'kes_start_blend'),
292
+ es_KES_end_blend = getattr(shared.opts, 'kes_end_blend'),
293
+ es_KES_sharpness = getattr(shared.opts, 'kes_sharpness'),
294
+ es_KES_initial_step_size = getattr(shared.opts, 'kes_initial_step_size'),
295
+ es_KES_final_step_size = getattr(shared.opts, 'kes_final_step_size'),
296
+ es_KES_initial_noise = getattr(shared.opts, 'kes_initial_noise'),
297
+ es_KES_final_noise = getattr(shared.opts, 'kes_final_noise'),
298
+ es_KES_smooth_blend = getattr(shared.opts, 'kes_smooth_blend'),
299
+ es_KES_step_size_factor = getattr(shared.opts, 'kes_step_size_factor'),
300
+ es_KES_noise_scale = getattr(shared.opts, 'kes_noise_scale'),
301
+ ))
302
+ return
303
+
304
+ try:
305
+ import modules.sd_schedulers as schedulers
306
+
307
+ if True:
308
+ # убираем уже зарегистрированные версии с тем же именем/лейблом
309
+ def _drop(name=None, label=None):
310
+ schedulers.schedulers = [
311
+ s for s in getattr(schedulers, "schedulers", [])
312
+ if (name is not None and getattr(s, "name", None) == name) is False
313
+ and (label is not None and getattr(s, "label", None) == label) is False
314
+ ]
315
+
316
+ _drop(name="optimal_ss"); _drop(label="Optimal Steps")
317
+ _drop(name="custom"); _drop(label="custom")
318
+
319
+ print("Extension: Extra Schedulers: (re)adding schedulers")
320
+
321
+ # далее — как у вас: создаём объекты Scheduler(...)
322
+
323
+ print("Extension: Extra Schedulers: adding new schedulers")
324
+ CosineScheduler = schedulers.Scheduler("cosine", "Cosine", cosine_scheduler)
325
+ CosExpScheduler = schedulers.Scheduler("cosexp", "CosineExponential blend", cosexpblend_scheduler)
326
+ CosExpBScheduler = schedulers.Scheduler("cosprev", "CosExp blend boost", cosexpblend_boost_scheduler)
327
+ PhiScheduler = schedulers.Scheduler("phi", "Phi", phi_scheduler)
328
+ VPScheduler = schedulers.Scheduler("vp", "VP", get_sigmas_vp)
329
+ LaplaceScheduler = schedulers.Scheduler("laplace", "Laplace", get_sigmas_laplace)
330
+
331
+ SineScheduler = schedulers.Scheduler("sine_sc", "Sine scaled", get_sigmas_sinusoidal_sf)
332
+ InvCosScheduler = schedulers.Scheduler("inv_cos_sc", "Inverse Cosine scaled", get_sigmas_invcosinusoidal_sf)
333
+ CosDynScheduler = schedulers.Scheduler("cosine_dyn", "Cosine Dynamic", get_sigmas_react_cosinusoidal_dynsf)
334
+ KarrasDynScheduler = schedulers.Scheduler("karras_dyn", "Karras Dynamic", get_sigmas_karras_dynamic)
335
+ KarrasExpDecayScheduler = schedulers.Scheduler("karras_exp_d", "Karras Exp Decay", get_sigmas_karras_exponential_decay)
336
+ KarrasExpIncScheduler = schedulers.Scheduler("karras_exp_i", "Karras Exp Inc", get_sigmas_karras_exponential_increment)
337
+
338
+ SimpleKEScheduler = schedulers.Scheduler("simple_kes", "Simple KES", get_sigmas_simple_kes)
339
+ OSSFlowScheduler = schedulers.Scheduler("optimal_ss", "Optimal Steps", get_sigmas_oss)
340
+ CustomScheduler = schedulers.Scheduler("custom", "custom", custom_scheduler)
341
+
342
+ schedulers.schedulers.append(CosineScheduler)
343
+ schedulers.schedulers.append(CosExpScheduler)
344
+ schedulers.schedulers.append(CosExpBScheduler)
345
+ schedulers.schedulers.append(PhiScheduler)
346
+ schedulers.schedulers.append(VPScheduler)
347
+ schedulers.schedulers.append(LaplaceScheduler)
348
+ schedulers.schedulers.append(SineScheduler)
349
+ schedulers.schedulers.append(InvCosScheduler)
350
+ schedulers.schedulers.append(CosDynScheduler)
351
+ schedulers.schedulers.append(KarrasDynScheduler)
352
+ schedulers.schedulers.append(KarrasExpDecayScheduler)
353
+ schedulers.schedulers.append(KarrasExpIncScheduler)
354
+ schedulers.schedulers.append(SimpleKEScheduler)
355
+ schedulers.schedulers.append(OSSFlowScheduler)
356
+ schedulers.schedulers.append(CustomScheduler)
357
+
358
+ schedulers.schedulers_map = {
359
+ **{x.name: x for x in schedulers.schedulers},
360
+ **{x.label: x for x in schedulers.schedulers}
361
+ }
362
+
363
+ # CFG++ method is Forge only, not working in A1111
364
+ from modules import sd_samplers_common, sd_samplers
365
+ from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
366
+ from scripts.samplers_cfgpp import (
367
+ sample_euler_ancestral_cfgpp, sample_euler_cfgpp, sample_euler_dy_cfgpp,
368
+ sample_euler_smea_dy_cfgpp, sample_euler_negative_cfgpp, sample_euler_negative_dy_cfgpp
369
+ )
370
+ from scripts.forgeClassic_cfgpp import (
371
+ sample_dpmpp_sde_cfgpp, sample_dpmpp_2m_cfgpp,
372
+ sample_dpmpp_2m_sde_cfgpp, sample_dpmpp_3m_sde_cfgpp,
373
+ sample_dpmpp_2s_ancestral_cfgpp
374
+ )
375
+
376
+ samplers_cfgpp = [
377
+ ("Euler a CFG++", sample_euler_ancestral_cfgpp, ["k_euler_a_cfgpp"], {"uses_ensd": True}),
378
+ ("Euler CFG++", sample_euler_cfgpp, ["k_euler_cfgpp"], {}),
379
+ ("Euler Dy CFG++", sample_euler_dy_cfgpp, ["k_euler_dy_cfgpp"], {}),
380
+ ("Euler SMEA Dy CFG++", sample_euler_smea_dy_cfgpp, ["k_euler_smea_dy_cfgpp"], {}),
381
+ ("Euler Negative CFG++", sample_euler_negative_cfgpp, ["k_euler_negative_cfgpp"], {}),
382
+ ("Euler Negative Dy CFG++", sample_euler_negative_dy_cfgpp, ["k_euler_negative_dy_cfgpp"], {}),
383
+ ("RES multistep CFG++", sample_res_multistep_cfgpp, ["k_res_multi_cfgpp"], {}),
384
+ ("Gradient Estimation CFG++", sample_gradient_e_cfgpp, ["k_grad_est_cfgpp"], {}),
385
+ ("Gradient Estimation 2S CFG++", sample_gradient_e_2s_cfgpp,["k_ge2s_cfgpp"], {"second_order": True} ),
386
+ # ("GE/DPM2 CFG++", sample_ge_dpm2_cfgpp, ["k_ge_dpm_cfgpp"], {}),
387
+ ("DPM++ SDE CFG++", sample_dpmpp_sde_cfgpp, ["k_dpmpp_sde_cfgpp"], {"brownian_noise": True, "second_order": True}),
388
+ ("DPM++ 2M CFG++", sample_dpmpp_2m_cfgpp, ["k_dpmpp_2m_cfgpp"], {}),
389
+ ("DPM++ 2M SDE CFG++", sample_dpmpp_2m_sde_cfgpp, ["k_dpmpp_2m_sde_cfgpp"], {"brownian_noise": True}),
390
+ ("DPM++ 3M SDE CFG++", sample_dpmpp_3m_sde_cfgpp, ["k_dpmpp_3m_sde_cfgpp"], {"brownian_noise": True, 'discard_next_to_last_sigma': True}),
391
+ ("DPM++ 2S a CFG++", sample_dpmpp_2s_ancestral_cfgpp,["k_dpmpp_2s_a_cfgpp"], {"uses_ensd": True, "second_order": True}),
392
+ ]
393
+
394
+ samplers_data_cfgpp = [
395
+ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
396
+ for label, funcname, aliases, options in samplers_cfgpp
397
+ if callable(funcname)
398
+ ]
399
+
400
+ sampler_extra_params['sample_euler_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
401
+ sampler_extra_params['sample_euler_negative_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
402
+ sampler_extra_params['sample_euler_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
403
+ sampler_extra_params['sample_euler_negative_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
404
+ sampler_extra_params['sample_euler_smea_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
405
+
406
+ sampler_extra_params['sample_dpmpp_sde_cfgpp'] = ['s_noise']
407
+ sampler_extra_params['sample_dpmpp_2m_sde_cfgpp'] = ['s_noise']
408
+ sampler_extra_params['sample_dpmpp_3m_sde_cfgpp'] = ['s_noise']
409
+ sampler_extra_params['sample_dpmpp_2s_ancestral_cfgpp']= ['s_noise']
410
+ sampler_extra_params['sample_gradient_e_2s_cfgpp'] = ['s_noise']
411
+
412
+ sd_samplers.all_samplers.extend(samplers_data_cfgpp)
413
+ #except:
414
+ #pass
415
+
416
+ samplers_extra = [
417
+ ("RES multistep", sample_res_multistep, ["k_res_multi"], {}),
418
+ ("Refined Exponential Solver", sample_res_solver, ["k_res"], {}),
419
+ ("DPM++ 4M SDE", sample_clyb_4m_sde_momentumized, ["k_dpmpp_4m_sde"], {}),
420
+ ("Gradient Estimation", sample_gradient_e, ["k_grad_est"], {}),
421
+ ("SEEDS-2", sample_seeds_2, ["k_seeds2"], {}),
422
+ ("SEEDS-3", sample_seeds_3, ["k_seeds3"], {}),
423
+ ]
424
+ sampler_extra_params['sample_seeds_2'] = ['s_noise']
425
+ sampler_extra_params['sample_seeds_3'] = ['s_noise']
426
+
427
+ samplers_data_extra = [
428
+ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
429
+ for label, funcname, aliases, options in samplers_extra
430
+ if callable(funcname)
431
+ ]
432
+
433
+ sd_samplers.all_samplers.extend(samplers_data_extra)
434
+ sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
435
+ sd_samplers.set_samplers()
436
+
437
+
438
+ ExtraScheduler.installed = True
439
+ except:
440
+ print ("Extension: Extra Schedulers: unsupported webUI")
441
+ ExtraScheduler.installed = False