Update webUI_ExtraSchedulers/scripts/extra_schedulers.py
Browse files
webUI_ExtraSchedulers/scripts/extra_schedulers.py
CHANGED
|
@@ -1,432 +1,441 @@
|
|
| 1 |
-
import gradio
|
| 2 |
-
import math, numpy
|
| 3 |
-
import torch
|
| 4 |
-
from modules import scripts, shared
|
| 5 |
-
|
| 6 |
-
# Python 3.10+, PyTorch 2.1+, NumPy 1.24+
|
| 7 |
-
def
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
base_sigmas = [
|
| 36 |
-
|
| 37 |
-
# 2
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
#
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
sigmas =
|
| 150 |
-
sigmas
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
sigmas =
|
| 159 |
-
sigmas
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
sigmas =
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
from
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
def
|
| 268 |
-
#
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
custom_sigmas
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
_drop(name=
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
schedulers.
|
| 336 |
-
schedulers.
|
| 337 |
-
|
| 338 |
-
schedulers.
|
| 339 |
-
schedulers.
|
| 340 |
-
schedulers.
|
| 341 |
-
|
| 342 |
-
schedulers.schedulers.append(
|
| 343 |
-
schedulers.schedulers.append(
|
| 344 |
-
schedulers.schedulers.append(
|
| 345 |
-
schedulers.schedulers.append(
|
| 346 |
-
schedulers.schedulers.append(
|
| 347 |
-
schedulers.schedulers.append(
|
| 348 |
-
schedulers.schedulers.append(
|
| 349 |
-
schedulers.schedulers.append(
|
| 350 |
-
|
| 351 |
-
schedulers.
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
("
|
| 378 |
-
|
| 379 |
-
("
|
| 380 |
-
("
|
| 381 |
-
("
|
| 382 |
-
("
|
| 383 |
-
("
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
sampler_extra_params['
|
| 401 |
-
sampler_extra_params['
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio
|
| 2 |
+
import math, numpy
|
| 3 |
+
import torch
|
| 4 |
+
from modules import scripts, shared
|
| 5 |
+
|
| 6 |
+
# Python 3.10+, PyTorch 2.1+, NumPy 1.24+
|
| 7 |
+
def get_sigmas_oss_improved(n, sigma_min, sigma_max, device):
|
| 8 |
+
"""
|
| 9 |
+
OSS Improved:
|
| 10 |
+
- Сохраняет "умную" форму кривой (распределение шагов) от AYS/OSS.
|
| 11 |
+
- НО масштабирует её под ваши настройки sigma_min/sigma_max.
|
| 12 |
+
- Это позволяет использовать OSS с Turbo, LCM, Lightning и любыми другими моделями.
|
| 13 |
+
"""
|
| 14 |
+
import numpy
|
| 15 |
+
import torch
|
| 16 |
+
from modules import shared
|
| 17 |
+
|
| 18 |
+
def loglinear_interp(values: list[float], num_steps: int) -> numpy.ndarray:
|
| 19 |
+
arr = numpy.asarray(values, dtype=float)
|
| 20 |
+
xs = numpy.linspace(0.0, 1.0, arr.shape[0])
|
| 21 |
+
ys = numpy.log(arr[::-1])
|
| 22 |
+
new_xs = numpy.linspace(0.0, 1.0, num_steps)
|
| 23 |
+
new_ys = numpy.interp(new_xs, xs, ys)
|
| 24 |
+
out = numpy.exp(new_ys)[::-1].copy()
|
| 25 |
+
return out
|
| 26 |
+
|
| 27 |
+
m = shared.sd_model
|
| 28 |
+
|
| 29 |
+
# 1. Выбираем базовый пресет (форму кривой)
|
| 30 |
+
if getattr(m, "is_sd3", False) or getattr(m, "is_flux", False):
|
| 31 |
+
base_sigmas = [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001]
|
| 32 |
+
elif getattr(m, "is_sdxl", False):
|
| 33 |
+
base_sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
|
| 34 |
+
else: # SD1.5 / SD2.x
|
| 35 |
+
base_sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
|
| 36 |
+
|
| 37 |
+
# 2. Интерполируем под нужное количество шагов
|
| 38 |
+
if n != len(base_sigmas):
|
| 39 |
+
sigmas_np = loglinear_interp(base_sigmas, n)
|
| 40 |
+
else:
|
| 41 |
+
sigmas_np = numpy.asarray(base_sigmas, dtype=float)
|
| 42 |
+
|
| 43 |
+
# 3. УЛУЧШЕНИЕ: Масштабирование под границы пользователя
|
| 44 |
+
# Находим диапазон самого пресета
|
| 45 |
+
preset_max = sigmas_np[0]
|
| 46 |
+
preset_min = sigmas_np[-1]
|
| 47 |
+
|
| 48 |
+
# Нормализуем пресет в диапазон 0..1
|
| 49 |
+
# (sigmas_np - min) / (max - min)
|
| 50 |
+
sigmas_norm = (sigmas_np - preset_min) / (preset_max - preset_min)
|
| 51 |
+
|
| 52 |
+
# Растягиваем нормализованную кривую на диапазон пользователя (sigma_min...sigma_max)
|
| 53 |
+
# Формула: norm * (new_max - new_min) + new_min
|
| 54 |
+
sigmas_scaled = sigmas_norm * (sigma_max - sigma_min) + sigma_min
|
| 55 |
+
|
| 56 |
+
# 4. Добавляем терминальный ноль (стандарт для k-diffusion)
|
| 57 |
+
sigmas_final = numpy.append(sigmas_scaled, [0.0])
|
| 58 |
+
|
| 59 |
+
return torch.as_tensor(sigmas_final, dtype=torch.float32, device=device)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def cosine_scheduler (n, sigma_min, sigma_max, device):
|
| 64 |
+
sigmas = torch.zeros(n, device=device)
|
| 65 |
+
if n == 1:
|
| 66 |
+
sigmas[0] = sigma_max ** 0.5
|
| 67 |
+
else:
|
| 68 |
+
for x in range(n):
|
| 69 |
+
p = x / (n-1)
|
| 70 |
+
C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
|
| 71 |
+
sigmas[x] = C
|
| 72 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 73 |
+
|
| 74 |
+
def cosexpblend_boost_scheduler (n, sigma_min, sigma_max, device):
|
| 75 |
+
sigmas = []
|
| 76 |
+
if n == 1:
|
| 77 |
+
sigmas.append(sigma_max ** 0.5)
|
| 78 |
+
else:
|
| 79 |
+
detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.27, 1.0, 1.0])
|
| 80 |
+
|
| 81 |
+
K = (sigma_min / sigma_max)**(1/(n-1))
|
| 82 |
+
E = sigma_max
|
| 83 |
+
for x in range(n):
|
| 84 |
+
p = x / (n-1)
|
| 85 |
+
C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
|
| 86 |
+
sigmas.append(detail[x] * (C + p * (E - C)))
|
| 87 |
+
E *= K
|
| 88 |
+
|
| 89 |
+
sigmas += [0.0]
|
| 90 |
+
|
| 91 |
+
return torch.FloatTensor(sigmas).to(device)
|
| 92 |
+
|
| 93 |
+
def cosexpblend_scheduler (n, sigma_min, sigma_max, device):
|
| 94 |
+
sigmas = []
|
| 95 |
+
if n == 1:
|
| 96 |
+
sigmas.append(sigma_max ** 0.5)
|
| 97 |
+
else:
|
| 98 |
+
K = (sigma_min / sigma_max)**(1/(n-1))
|
| 99 |
+
E = sigma_max
|
| 100 |
+
for x in range(n):
|
| 101 |
+
p = x / (n-1)
|
| 102 |
+
C = sigma_min + 0.5*(sigma_max-sigma_min)*(1 - math.cos(math.pi*(1 - p**0.5)))
|
| 103 |
+
sigmas.append(C + p * (E - C))
|
| 104 |
+
E *= K
|
| 105 |
+
sigmas += [0.0]
|
| 106 |
+
return torch.FloatTensor(sigmas).to(device)
|
| 107 |
+
|
| 108 |
+
## phi scheduler modified from original by @extraltodeus
|
| 109 |
+
def phi_scheduler(n, sigma_min, sigma_max, device):
|
| 110 |
+
sigmas = torch.zeros(n, device=device)
|
| 111 |
+
if n == 1:
|
| 112 |
+
sigmas[0] = sigma_max ** 0.5
|
| 113 |
+
else:
|
| 114 |
+
phi = (1 + 5**0.5) / 2
|
| 115 |
+
for x in range(n):
|
| 116 |
+
sigmas[x] = sigma_min + (sigma_max-sigma_min)*((1-x/(n-1))**(phi*phi))
|
| 117 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 118 |
+
|
| 119 |
+
def get_sigmas_vp(n, sigma_min, sigma_max, device='cpu'):
|
| 120 |
+
"""Constructs a continuous VP noise schedule."""
|
| 121 |
+
|
| 122 |
+
beta_d = 19.9
|
| 123 |
+
beta_min = 0.1
|
| 124 |
+
eps_s = 1e-3
|
| 125 |
+
|
| 126 |
+
t = torch.linspace(1, eps_s, n, device=device)
|
| 127 |
+
sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
|
| 128 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 129 |
+
|
| 130 |
+
def get_sigmas_laplace(n, sigma_min, sigma_max, device='cpu'):
|
| 131 |
+
"""Constructs the noise schedule proposed by Tiankai et al. (2024). """
|
| 132 |
+
mu = 0.
|
| 133 |
+
beta = 0.5
|
| 134 |
+
epsilon = 1e-5 # avoid log(0)
|
| 135 |
+
x = torch.linspace(0, 1, n, device=device)
|
| 136 |
+
clamp = lambda x: torch.clamp(x, min=sigma_min, max=sigma_max)
|
| 137 |
+
lmb = mu - beta * torch.sign(0.5-x) * torch.log(1 - 2 * torch.abs(0.5-x) + epsilon)
|
| 138 |
+
sigmas = clamp(torch.exp(lmb))
|
| 139 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def get_sigmas_sinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
|
| 144 |
+
"""Constructs a sinusoidal noise schedule."""
|
| 145 |
+
sf = 3.5
|
| 146 |
+
x = torch.linspace(0, 1, n, device=device)
|
| 147 |
+
sigmas = (sigma_min + (sigma_max - sigma_min) * (1 - torch.sin(torch.pi / 2 * x)))/sigma_max
|
| 148 |
+
sigmas = sigmas**sf
|
| 149 |
+
sigmas = sigmas * sigma_max
|
| 150 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 151 |
+
|
| 152 |
+
def get_sigmas_invcosinusoidal_sf(n, sigma_min, sigma_max, device='cpu'):
|
| 153 |
+
"""Constructs a sinusoidal noise schedule."""
|
| 154 |
+
sf = 3.5
|
| 155 |
+
x = torch.linspace(0, 1, n, device=device)
|
| 156 |
+
sigmas = (sigma_min + (sigma_max - sigma_min) * (0.5*(torch.cos(x * math.pi) + 1)))/sigma_max
|
| 157 |
+
sigmas = sigmas**sf
|
| 158 |
+
sigmas = sigmas * sigma_max
|
| 159 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 160 |
+
|
| 161 |
+
def get_sigmas_react_cosinusoidal_dynsf(n, sigma_min, sigma_max, device='cpu'):
|
| 162 |
+
"""Constructs a sinusoidal noise schedule."""
|
| 163 |
+
sf = 2.15
|
| 164 |
+
x = torch.linspace(0, 1, n, device=device)
|
| 165 |
+
sigmas = (sigma_min+(sigma_max-sigma_min)*(torch.cos(x*(torch.pi/2))))/sigma_max
|
| 166 |
+
sigmas = sigmas**(sf*(n*x/n))
|
| 167 |
+
sigmas = sigmas * sigma_max
|
| 168 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 169 |
+
|
| 170 |
+
def get_sigmas_karras_dynamic(n, sigma_min, sigma_max, device='cpu'):
|
| 171 |
+
"""Constructs the noise schedule of Karras et al. (2022)."""
|
| 172 |
+
rho = 7.
|
| 173 |
+
ramp = torch.linspace(0, 1, n, device=device)
|
| 174 |
+
min_inv_rho = sigma_min ** (1 / rho)
|
| 175 |
+
max_inv_rho = sigma_max ** (1 / rho)
|
| 176 |
+
sigmas = torch.zeros_like(ramp)
|
| 177 |
+
for i in range(n):
|
| 178 |
+
sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (math.cos(i*math.tau/n)*2+rho)
|
| 179 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 180 |
+
|
| 181 |
+
def get_sigmas_karras_exponential_decay(n, sigma_min, sigma_max, device='cpu'):
|
| 182 |
+
"""Constructs the noise schedule of Karras et al. (2022)."""
|
| 183 |
+
rho = 7.
|
| 184 |
+
ramp = torch.linspace(0, 1, n, device=device)
|
| 185 |
+
min_inv_rho = sigma_min ** (1 / rho)
|
| 186 |
+
max_inv_rho = sigma_max ** (1 / rho)
|
| 187 |
+
sigmas = torch.zeros_like(ramp)
|
| 188 |
+
for i in range(n):
|
| 189 |
+
sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho-(3*i/n))
|
| 190 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 191 |
+
|
| 192 |
+
def get_sigmas_karras_exponential_increment(n, sigma_min, sigma_max, device='cpu'):
|
| 193 |
+
"""Constructs the noise schedule of Karras et al. (2022)."""
|
| 194 |
+
rho = 7.
|
| 195 |
+
ramp = torch.linspace(0, 1, n, device=device)
|
| 196 |
+
min_inv_rho = sigma_min ** (1 / rho)
|
| 197 |
+
max_inv_rho = sigma_max ** (1 / rho)
|
| 198 |
+
sigmas = torch.zeros_like(ramp)
|
| 199 |
+
for i in range(n):
|
| 200 |
+
sigmas[i] = (max_inv_rho + ramp[i] * (min_inv_rho - max_inv_rho)) ** (rho+3*i/n)
|
| 201 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 202 |
+
|
| 203 |
+
def custom_scheduler(n, sigma_min, sigma_max, device):
|
| 204 |
+
if 'import' in ExtraScheduler.customSigmas:
|
| 205 |
+
sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
|
| 206 |
+
elif 'eval' in ExtraScheduler.customSigmas:
|
| 207 |
+
sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
|
| 208 |
+
elif 'scripts' in ExtraScheduler.customSigmas:
|
| 209 |
+
sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
|
| 210 |
+
|
| 211 |
+
elif ExtraScheduler.customSigmas[0] == '[' and ExtraScheduler.customSigmas[-1] == ']':
|
| 212 |
+
sigmasList = [float(x) for x in ExtraScheduler.customSigmas.strip('[]').split(',')]
|
| 213 |
+
|
| 214 |
+
if sigmasList[0] == 1.0 and sigmasList[-1] == 0.0:
|
| 215 |
+
for x in range(len(sigmasList)):
|
| 216 |
+
sigmasList[x] *= (sigma_max - sigma_min)
|
| 217 |
+
sigmasList[x] += sigma_min
|
| 218 |
+
elif sigmasList[-1] == 0.0:
|
| 219 |
+
#don't interpolate to number of steps, use as is
|
| 220 |
+
return torch.tensor(sigmasList)
|
| 221 |
+
|
| 222 |
+
xs = numpy.linspace(0, 1, len(sigmasList))
|
| 223 |
+
ys = numpy.log(sigmasList[::-1])
|
| 224 |
+
|
| 225 |
+
new_xs = numpy.linspace(0, 1, n)
|
| 226 |
+
new_ys = numpy.interp(new_xs, xs, ys)
|
| 227 |
+
|
| 228 |
+
interpolated_ys = numpy.exp(new_ys)[::-1].copy()
|
| 229 |
+
sigmas = torch.tensor(interpolated_ys, device=device)
|
| 230 |
+
else:
|
| 231 |
+
sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
|
| 232 |
+
detail = numpy.interp(numpy.linspace(0, 1, n), numpy.linspace(0, 1, 5), [1.0, 1.0, 1.25, 1.0, 1.0])
|
| 233 |
+
|
| 234 |
+
phi = (1 + 5**0.5) / 2
|
| 235 |
+
pi = math.pi
|
| 236 |
+
|
| 237 |
+
s = 0
|
| 238 |
+
while (s < n):
|
| 239 |
+
x = (s) / (n - 1)
|
| 240 |
+
M = sigma_max
|
| 241 |
+
m = sigma_min
|
| 242 |
+
d = detail[s]
|
| 243 |
+
|
| 244 |
+
sigmas[s] = eval((ExtraScheduler.customSigmas))
|
| 245 |
+
s += 1
|
| 246 |
+
return torch.cat([sigmas, sigmas.new_zeros([1])])
|
| 247 |
+
|
| 248 |
+
from scripts.simple_kes import get_sigmas_simple_kes
|
| 249 |
+
|
| 250 |
+
from scripts.res_solver import sample_res_solver, sample_res_multistep, sample_res_multistep_cfgpp
|
| 251 |
+
from scripts.clybius_dpmpp_4m_sde import sample_clyb_4m_sde_momentumized
|
| 252 |
+
from scripts.gradient_estimation import sample_gradient_e, sample_gradient_e_cfgpp, sample_gradient_e_2s_cfgpp
|
| 253 |
+
from scripts.seeds import sample_seeds_2, sample_seeds_3
|
| 254 |
+
|
| 255 |
+
from modules import sd_samplers_common, sd_samplers
|
| 256 |
+
from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
|
| 257 |
+
|
| 258 |
+
class ExtraScheduler(scripts.Script):
|
| 259 |
+
sorting_priority = 99
|
| 260 |
+
|
| 261 |
+
installed = False
|
| 262 |
+
customSigmas = 'm + (M-m)*(1-x)**3'
|
| 263 |
+
|
| 264 |
+
def title(self):
|
| 265 |
+
return "Extra Schedulers (custom)"
|
| 266 |
+
|
| 267 |
+
def show(self, is_img2img):
|
| 268 |
+
# make this extension visible in both txt2img and img2img tab.
|
| 269 |
+
if ExtraScheduler.installed:
|
| 270 |
+
return scripts.AlwaysVisible
|
| 271 |
+
else:
|
| 272 |
+
return False
|
| 273 |
+
|
| 274 |
+
def ui(self, *args, **kwargs):
|
| 275 |
+
#with gradio.Accordion(open=False, label=self.title(), visible=ExtraScheduler.installed):
|
| 276 |
+
custom_sigmas = gradio.Textbox(value=ExtraScheduler.customSigmas, label='Extra Schedulers: custom function / list [n0, n1, n2, ...]', lines=1.01)
|
| 277 |
+
|
| 278 |
+
self.infotext_fields = [
|
| 279 |
+
(custom_sigmas, "es_custom"),
|
| 280 |
+
]
|
| 281 |
+
|
| 282 |
+
return [custom_sigmas]
|
| 283 |
+
|
| 284 |
+
def process(self, params, *script_args, **kwargs):
|
| 285 |
+
if params.scheduler == 'custom':
|
| 286 |
+
custom_sigmas = script_args[0]
|
| 287 |
+
ExtraScheduler.customSigmas = custom_sigmas
|
| 288 |
+
params.extra_generation_params.update(dict(es_custom = ExtraScheduler.customSigmas, ))
|
| 289 |
+
elif params.scheduler == 'Simple KES':
|
| 290 |
+
params.extra_generation_params.update(dict(
|
| 291 |
+
es_KES_start_blend = getattr(shared.opts, 'kes_start_blend'),
|
| 292 |
+
es_KES_end_blend = getattr(shared.opts, 'kes_end_blend'),
|
| 293 |
+
es_KES_sharpness = getattr(shared.opts, 'kes_sharpness'),
|
| 294 |
+
es_KES_initial_step_size = getattr(shared.opts, 'kes_initial_step_size'),
|
| 295 |
+
es_KES_final_step_size = getattr(shared.opts, 'kes_final_step_size'),
|
| 296 |
+
es_KES_initial_noise = getattr(shared.opts, 'kes_initial_noise'),
|
| 297 |
+
es_KES_final_noise = getattr(shared.opts, 'kes_final_noise'),
|
| 298 |
+
es_KES_smooth_blend = getattr(shared.opts, 'kes_smooth_blend'),
|
| 299 |
+
es_KES_step_size_factor = getattr(shared.opts, 'kes_step_size_factor'),
|
| 300 |
+
es_KES_noise_scale = getattr(shared.opts, 'kes_noise_scale'),
|
| 301 |
+
))
|
| 302 |
+
return
|
| 303 |
+
|
| 304 |
+
try:
|
| 305 |
+
import modules.sd_schedulers as schedulers
|
| 306 |
+
|
| 307 |
+
if True:
|
| 308 |
+
# убираем уже зарегистрированные версии с тем же именем/лейблом
|
| 309 |
+
def _drop(name=None, label=None):
|
| 310 |
+
schedulers.schedulers = [
|
| 311 |
+
s for s in getattr(schedulers, "schedulers", [])
|
| 312 |
+
if (name is not None and getattr(s, "name", None) == name) is False
|
| 313 |
+
and (label is not None and getattr(s, "label", None) == label) is False
|
| 314 |
+
]
|
| 315 |
+
|
| 316 |
+
_drop(name="optimal_ss"); _drop(label="Optimal Steps")
|
| 317 |
+
_drop(name="custom"); _drop(label="custom")
|
| 318 |
+
|
| 319 |
+
print("Extension: Extra Schedulers: (re)adding schedulers")
|
| 320 |
+
|
| 321 |
+
# далее — как у вас: создаём объекты Scheduler(...)
|
| 322 |
+
|
| 323 |
+
print("Extension: Extra Schedulers: adding new schedulers")
|
| 324 |
+
CosineScheduler = schedulers.Scheduler("cosine", "Cosine", cosine_scheduler)
|
| 325 |
+
CosExpScheduler = schedulers.Scheduler("cosexp", "CosineExponential blend", cosexpblend_scheduler)
|
| 326 |
+
CosExpBScheduler = schedulers.Scheduler("cosprev", "CosExp blend boost", cosexpblend_boost_scheduler)
|
| 327 |
+
PhiScheduler = schedulers.Scheduler("phi", "Phi", phi_scheduler)
|
| 328 |
+
VPScheduler = schedulers.Scheduler("vp", "VP", get_sigmas_vp)
|
| 329 |
+
LaplaceScheduler = schedulers.Scheduler("laplace", "Laplace", get_sigmas_laplace)
|
| 330 |
+
|
| 331 |
+
SineScheduler = schedulers.Scheduler("sine_sc", "Sine scaled", get_sigmas_sinusoidal_sf)
|
| 332 |
+
InvCosScheduler = schedulers.Scheduler("inv_cos_sc", "Inverse Cosine scaled", get_sigmas_invcosinusoidal_sf)
|
| 333 |
+
CosDynScheduler = schedulers.Scheduler("cosine_dyn", "Cosine Dynamic", get_sigmas_react_cosinusoidal_dynsf)
|
| 334 |
+
KarrasDynScheduler = schedulers.Scheduler("karras_dyn", "Karras Dynamic", get_sigmas_karras_dynamic)
|
| 335 |
+
KarrasExpDecayScheduler = schedulers.Scheduler("karras_exp_d", "Karras Exp Decay", get_sigmas_karras_exponential_decay)
|
| 336 |
+
KarrasExpIncScheduler = schedulers.Scheduler("karras_exp_i", "Karras Exp Inc", get_sigmas_karras_exponential_increment)
|
| 337 |
+
|
| 338 |
+
SimpleKEScheduler = schedulers.Scheduler("simple_kes", "Simple KES", get_sigmas_simple_kes)
|
| 339 |
+
OSSFlowScheduler = schedulers.Scheduler("optimal_ss", "Optimal Steps", get_sigmas_oss)
|
| 340 |
+
CustomScheduler = schedulers.Scheduler("custom", "custom", custom_scheduler)
|
| 341 |
+
|
| 342 |
+
schedulers.schedulers.append(CosineScheduler)
|
| 343 |
+
schedulers.schedulers.append(CosExpScheduler)
|
| 344 |
+
schedulers.schedulers.append(CosExpBScheduler)
|
| 345 |
+
schedulers.schedulers.append(PhiScheduler)
|
| 346 |
+
schedulers.schedulers.append(VPScheduler)
|
| 347 |
+
schedulers.schedulers.append(LaplaceScheduler)
|
| 348 |
+
schedulers.schedulers.append(SineScheduler)
|
| 349 |
+
schedulers.schedulers.append(InvCosScheduler)
|
| 350 |
+
schedulers.schedulers.append(CosDynScheduler)
|
| 351 |
+
schedulers.schedulers.append(KarrasDynScheduler)
|
| 352 |
+
schedulers.schedulers.append(KarrasExpDecayScheduler)
|
| 353 |
+
schedulers.schedulers.append(KarrasExpIncScheduler)
|
| 354 |
+
schedulers.schedulers.append(SimpleKEScheduler)
|
| 355 |
+
schedulers.schedulers.append(OSSFlowScheduler)
|
| 356 |
+
schedulers.schedulers.append(CustomScheduler)
|
| 357 |
+
|
| 358 |
+
schedulers.schedulers_map = {
|
| 359 |
+
**{x.name: x for x in schedulers.schedulers},
|
| 360 |
+
**{x.label: x for x in schedulers.schedulers}
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
# CFG++ method is Forge only, not working in A1111
|
| 364 |
+
from modules import sd_samplers_common, sd_samplers
|
| 365 |
+
from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
|
| 366 |
+
from scripts.samplers_cfgpp import (
|
| 367 |
+
sample_euler_ancestral_cfgpp, sample_euler_cfgpp, sample_euler_dy_cfgpp,
|
| 368 |
+
sample_euler_smea_dy_cfgpp, sample_euler_negative_cfgpp, sample_euler_negative_dy_cfgpp
|
| 369 |
+
)
|
| 370 |
+
from scripts.forgeClassic_cfgpp import (
|
| 371 |
+
sample_dpmpp_sde_cfgpp, sample_dpmpp_2m_cfgpp,
|
| 372 |
+
sample_dpmpp_2m_sde_cfgpp, sample_dpmpp_3m_sde_cfgpp,
|
| 373 |
+
sample_dpmpp_2s_ancestral_cfgpp
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
samplers_cfgpp = [
|
| 377 |
+
("Euler a CFG++", sample_euler_ancestral_cfgpp, ["k_euler_a_cfgpp"], {"uses_ensd": True}),
|
| 378 |
+
("Euler CFG++", sample_euler_cfgpp, ["k_euler_cfgpp"], {}),
|
| 379 |
+
("Euler Dy CFG++", sample_euler_dy_cfgpp, ["k_euler_dy_cfgpp"], {}),
|
| 380 |
+
("Euler SMEA Dy CFG++", sample_euler_smea_dy_cfgpp, ["k_euler_smea_dy_cfgpp"], {}),
|
| 381 |
+
("Euler Negative CFG++", sample_euler_negative_cfgpp, ["k_euler_negative_cfgpp"], {}),
|
| 382 |
+
("Euler Negative Dy CFG++", sample_euler_negative_dy_cfgpp, ["k_euler_negative_dy_cfgpp"], {}),
|
| 383 |
+
("RES multistep CFG++", sample_res_multistep_cfgpp, ["k_res_multi_cfgpp"], {}),
|
| 384 |
+
("Gradient Estimation CFG++", sample_gradient_e_cfgpp, ["k_grad_est_cfgpp"], {}),
|
| 385 |
+
("Gradient Estimation 2S CFG++", sample_gradient_e_2s_cfgpp,["k_ge2s_cfgpp"], {"second_order": True} ),
|
| 386 |
+
# ("GE/DPM2 CFG++", sample_ge_dpm2_cfgpp, ["k_ge_dpm_cfgpp"], {}),
|
| 387 |
+
("DPM++ SDE CFG++", sample_dpmpp_sde_cfgpp, ["k_dpmpp_sde_cfgpp"], {"brownian_noise": True, "second_order": True}),
|
| 388 |
+
("DPM++ 2M CFG++", sample_dpmpp_2m_cfgpp, ["k_dpmpp_2m_cfgpp"], {}),
|
| 389 |
+
("DPM++ 2M SDE CFG++", sample_dpmpp_2m_sde_cfgpp, ["k_dpmpp_2m_sde_cfgpp"], {"brownian_noise": True}),
|
| 390 |
+
("DPM++ 3M SDE CFG++", sample_dpmpp_3m_sde_cfgpp, ["k_dpmpp_3m_sde_cfgpp"], {"brownian_noise": True, 'discard_next_to_last_sigma': True}),
|
| 391 |
+
("DPM++ 2S a CFG++", sample_dpmpp_2s_ancestral_cfgpp,["k_dpmpp_2s_a_cfgpp"], {"uses_ensd": True, "second_order": True}),
|
| 392 |
+
]
|
| 393 |
+
|
| 394 |
+
samplers_data_cfgpp = [
|
| 395 |
+
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
|
| 396 |
+
for label, funcname, aliases, options in samplers_cfgpp
|
| 397 |
+
if callable(funcname)
|
| 398 |
+
]
|
| 399 |
+
|
| 400 |
+
sampler_extra_params['sample_euler_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
|
| 401 |
+
sampler_extra_params['sample_euler_negative_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
|
| 402 |
+
sampler_extra_params['sample_euler_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
|
| 403 |
+
sampler_extra_params['sample_euler_negative_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
|
| 404 |
+
sampler_extra_params['sample_euler_smea_dy_cfgpp'] = ['s_churn', 's_tmin', 's_tmax', 's_noise']
|
| 405 |
+
|
| 406 |
+
sampler_extra_params['sample_dpmpp_sde_cfgpp'] = ['s_noise']
|
| 407 |
+
sampler_extra_params['sample_dpmpp_2m_sde_cfgpp'] = ['s_noise']
|
| 408 |
+
sampler_extra_params['sample_dpmpp_3m_sde_cfgpp'] = ['s_noise']
|
| 409 |
+
sampler_extra_params['sample_dpmpp_2s_ancestral_cfgpp']= ['s_noise']
|
| 410 |
+
sampler_extra_params['sample_gradient_e_2s_cfgpp'] = ['s_noise']
|
| 411 |
+
|
| 412 |
+
sd_samplers.all_samplers.extend(samplers_data_cfgpp)
|
| 413 |
+
#except:
|
| 414 |
+
#pass
|
| 415 |
+
|
| 416 |
+
samplers_extra = [
|
| 417 |
+
("RES multistep", sample_res_multistep, ["k_res_multi"], {}),
|
| 418 |
+
("Refined Exponential Solver", sample_res_solver, ["k_res"], {}),
|
| 419 |
+
("DPM++ 4M SDE", sample_clyb_4m_sde_momentumized, ["k_dpmpp_4m_sde"], {}),
|
| 420 |
+
("Gradient Estimation", sample_gradient_e, ["k_grad_est"], {}),
|
| 421 |
+
("SEEDS-2", sample_seeds_2, ["k_seeds2"], {}),
|
| 422 |
+
("SEEDS-3", sample_seeds_3, ["k_seeds3"], {}),
|
| 423 |
+
]
|
| 424 |
+
sampler_extra_params['sample_seeds_2'] = ['s_noise']
|
| 425 |
+
sampler_extra_params['sample_seeds_3'] = ['s_noise']
|
| 426 |
+
|
| 427 |
+
samplers_data_extra = [
|
| 428 |
+
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
|
| 429 |
+
for label, funcname, aliases, options in samplers_extra
|
| 430 |
+
if callable(funcname)
|
| 431 |
+
]
|
| 432 |
+
|
| 433 |
+
sd_samplers.all_samplers.extend(samplers_data_extra)
|
| 434 |
+
sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
|
| 435 |
+
sd_samplers.set_samplers()
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
ExtraScheduler.installed = True
|
| 439 |
+
except:
|
| 440 |
+
print ("Extension: Extra Schedulers: unsupported webUI")
|
| 441 |
+
ExtraScheduler.installed = False
|