dikdimon commited on
Commit
079899a
·
verified ·
1 Parent(s): cb099ef

Upload sd_simple_kes_v2 using SD-Hub

Browse files
sd_simple_kes_v2/__pycache__/get_sigmas.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
sd_simple_kes_v2/__pycache__/plot_sigma_sequence.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
sd_simple_kes_v2/__pycache__/simple_kes_v2.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
sd_simple_kes_v2/__pycache__/validate_config.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
sd_simple_kes_v2/get_sigmas.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import linspace, tensor
4
+
5
+ #source files are from the diffusers library. Modified file to remove error messages on console:
6
+ '''
7
+ UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
8
+ min_inv_rho = tensor(sigma_min, device=device) ** (1 / rho)
9
+
10
+ '''
11
+ def append_zero(x):
12
+ return torch.cat([x, x.new_zeros([1])])
13
+
14
+ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
15
+ """Constructs the noise schedule of Karras et al. (2022)."""
16
+ ramp = linspace(0, 1, n, device=device)
17
+ #min_inv_rho = tensor(sigma_min, device=device) ** (1 / rho)
18
+ #max_inv_rho = tensor(sigma_max, device=device) ** (1 / rho)
19
+ def _to_tensor(val, device):
20
+ return val.to(device) if isinstance(val, torch.Tensor) else torch.tensor(val, device=device)
21
+
22
+ min_inv_rho = _to_tensor(sigma_min, device) ** (1 / rho)
23
+ max_inv_rho = _to_tensor(sigma_max, device) ** (1 / rho)
24
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
25
+ return append_zero(sigmas).to(device)
26
+
27
+
28
+
29
+ def get_sigmas_exponential(n, sigma_min, sigma_max, device='cpu'):
30
+ """Constructs an exponential noise schedule."""
31
+ sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
32
+ return append_zero(sigmas)
sd_simple_kes_v2/image_generation_data/generation_log_20250706_075947.txt ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ Final sigmas: sigma_min=0.13757067353874633, sigma_max=47.95768510805332
3
+ [Auto Sigma Min] sigma_min set to 0.05328631678672591 using scale factor 900
4
+ Generated sigma sequences. Karras: tensor([47.9577, 40.8963, 34.7614, 29.4468, 24.8569, 20.9051, 17.5141, 14.6142,
5
+ 12.1431, 10.0455, 8.2718, 6.7783, 5.5261, 4.4811, 3.6131, 2.8959,
6
+ 2.3064, 1.8247, 1.4333, 1.1174, 0.8642, 0.6626, 0.5034, 0.3788,
7
+ 0.2820, 0.2075, 0.1509, 0.1083, 0.0766, 0.0533, 0.0000],
8
+ device='cuda:0'), Exponential: tensor([47.9577, 37.9304, 29.9997, 23.7272, 18.7662, 14.8425, 11.7391, 9.2846,
9
+ 7.3434, 5.8080, 4.5936, 3.6332, 2.8735, 2.2727, 1.7975, 1.4217,
10
+ 1.1244, 0.8893, 0.7034, 0.5563, 0.4400, 0.3480, 0.2752, 0.2177,
11
+ 0.1722, 0.1362, 0.1077, 0.0852, 0.0674, 0.0533, 0.0000],
12
+ device='cuda:0')
13
+
14
+ --- Starting Pre-Pass Blending ---
15
+
16
+ [Prepass First Step - Step 0/30] Blended Sigma: 47.957703, Final Sigma: 53.952412
17
+ Prepass First Step Delta Converged: True delta_change: 0.000000, Target Default Settings:0.6
18
+
19
+ --- Early Stopping Evaluation at Step 12 ---
20
+ Current Blended Sigma: 6.541308
21
+ Sigma Variance: 205.154327
22
+ Relative Sigma Progress: 0.991854
23
+ Max Recent Sigma Change: 3.083299
24
+ Mean Recent Sigma Change: 2.090022
25
+
26
+ --- Early Stopping Evaluation at Step 13 ---
27
+ Current Blended Sigma: 5.300452
28
+ Sigma Variance: 205.005798
29
+ Relative Sigma Progress: 0.989947
30
+ Max Recent Sigma Change: 2.492145
31
+ Mean Recent Sigma Change: 1.673866
32
+
33
+ --- Early Stopping Evaluation at Step 14 ---
34
+ Current Blended Sigma: 4.269258
35
+ Sigma Variance: 204.924225
36
+ Relative Sigma Progress: 0.987519
37
+ Max Recent Sigma Change: 2.003127
38
+ Mean Recent Sigma Change: 1.332481
39
+
40
+ --- Early Stopping Evaluation at Step 15 ---
41
+ Current Blended Sigma: 3.417069
42
+ Sigma Variance: 204.903030
43
+ Relative Sigma Progress: 0.984406
44
+ Max Recent Sigma Change: 1.600587
45
+ Mean Recent Sigma Change: 1.053938
46
+
47
+ --- Early Stopping Evaluation at Step 16 ---
48
+ Current Blended Sigma: 2.716985
49
+ Sigma Variance: 204.930466
50
+ Relative Sigma Progress: 0.980388
51
+ Max Recent Sigma Change: 1.270950
52
+ Mean Recent Sigma Change: 0.827970
53
+
54
+ --- Early Stopping Evaluation at Step 17 ---
55
+ Current Blended Sigma: 2.145483
56
+ Sigma Variance: 204.993378
57
+ Relative Sigma Progress: 0.975163
58
+ Max Recent Sigma Change: 1.002521
59
+ Mean Recent Sigma Change: 0.645771
60
+
61
+ --- Early Stopping Evaluation at Step 18 ---
62
+ Current Blended Sigma: 1.682080
63
+ Sigma Variance: 205.079575
64
+ Relative Sigma Progress: 0.968321
65
+ Max Recent Sigma Change: 0.785219
66
+ Mean Recent Sigma Change: 0.499817
67
+
68
+ --- Early Stopping Evaluation at Step 19 ---
69
+ Current Blended Sigma: 1.309016
70
+ Sigma Variance: 205.178589
71
+ Relative Sigma Progress: 0.959293
72
+ Max Recent Sigma Change: 0.610414
73
+ Mean Recent Sigma Change: 0.383711
74
+
75
+ --- Early Stopping Evaluation at Step 20 ---
76
+ Current Blended Sigma: 1.010955
77
+ Sigma Variance: 205.282227
78
+ Relative Sigma Progress: 0.947291
79
+ Max Recent Sigma Change: 0.470745
80
+ Mean Recent Sigma Change: 0.292036
81
+
82
+ --- Early Stopping Evaluation at Step 21 ---
83
+ Current Blended Sigma: 0.774734
84
+ Sigma Variance: 205.384445
85
+ Relative Sigma Progress: 0.931220
86
+ Max Recent Sigma Change: 0.359954
87
+ Mean Recent Sigma Change: 0.220228
88
+
89
+ --- Early Stopping Evaluation at Step 22 ---
90
+ Current Blended Sigma: 0.589107
91
+ Sigma Variance: 205.481079
92
+ Relative Sigma Progress: 0.909547
93
+ Max Recent Sigma Change: 0.272753
94
+ Mean Recent Sigma Change: 0.164463
95
+
96
+ --- Early Stopping Evaluation at Step 23 ---
97
+ Current Blended Sigma: 0.444537
98
+ Sigma Variance: 205.569595
99
+ Relative Sigma Progress: 0.880131
100
+ Max Recent Sigma Change: 0.204688
101
+ Mean Recent Sigma Change: 0.121554
102
+
103
+ --- Early Stopping Evaluation at Step 24 ---
104
+ Current Blended Sigma: 0.332985
105
+ Sigma Variance: 205.648605
106
+ Relative Sigma Progress: 0.839974
107
+ Max Recent Sigma Change: 0.152039
108
+ Mean Recent Sigma Change: 0.088862
109
+
110
+ --- Early Stopping Evaluation at Step 25 ---
111
+ Current Blended Sigma: 0.247734
112
+ Sigma Variance: 205.717728
113
+ Relative Sigma Progress: 0.784905
114
+ Max Recent Sigma Change: 0.111705
115
+ Mean Recent Sigma Change: 0.064218
116
+
117
+ --- Early Stopping Evaluation at Step 26 ---
118
+ Current Blended Sigma: 0.183217
119
+ Sigma Variance: 205.777161
120
+ Relative Sigma Progress: 0.709164
121
+ Max Recent Sigma Change: 0.081129
122
+ Mean Recent Sigma Change: 0.045850
123
+
124
+ --- Early Stopping Evaluation at Step 27 ---
125
+ Current Blended Sigma: 0.134866
126
+ Sigma Variance: 205.827545
127
+ Relative Sigma Progress: 0.604895
128
+ Max Recent Sigma Change: 0.058207
129
+ Mean Recent Sigma Change: 0.032324
130
+ Early stopping triggered by mean at step 26. Mean change: 0.058207. Steps used: 27/31, steps skipped: 4
131
+
132
+ --- Early Stopping Evaluation at Step 28 ---
133
+ Current Blended Sigma: 0.098973
134
+ Sigma Variance: 205.869736
135
+ Relative Sigma Progress: 0.461609
136
+ Max Recent Sigma Change: 0.041230
137
+ Mean Recent Sigma Change: 0.022493
138
+ Early stopping triggered by mean at step 27. Mean change: 0.041230. Steps used: 28/31, steps skipped: 3
139
+
140
+ --- Early Stopping Evaluation at Step 29 ---
141
+ Current Blended Sigma: 0.072565
142
+ Sigma Variance: 205.904678
143
+ Relative Sigma Progress: 0.265671
144
+ Max Recent Sigma Change: 0.028817
145
+ Mean Recent Sigma Change: 0.015444
146
+ Early stopping triggered by mean at step 28. Mean change: 0.028817. Steps used: 29/31, steps skipped: 2
147
+
148
+ --- Early Stopping Evaluation at Step 30 ---
149
+ Current Blended Sigma: 0.053286
150
+ Sigma Variance: 205.933395
151
+ Relative Sigma Progress: 0.875376
152
+ Max Recent Sigma Change: 0.019865
153
+ Mean Recent Sigma Change: 0.010461
154
+ Early stopping triggered by mean at step 29. Mean change: 0.019865. Steps used: 30/31, steps skipped: 1
155
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
156
+ [Randomization] sigma_max: No randomization applied. Using default value: 47.95768510805332
157
+ [Randomization] start_blend: No randomization applied. Using default value: 0.08
158
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5
159
+ [Randomization] sharpness: No randomization applied. Using default value: 0.85
160
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.06
161
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
162
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
163
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
164
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
165
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
166
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
167
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
168
+ [Randomization] rho: No randomization applied. Using default value: 7.959565031107985
169
+ Using device: cuda
170
+ Final sigmas: sigma_min=0.13757067353874633, sigma_max=47.95768510805332
171
+ [Auto Sigma Min] sigma_min set to 0.05328631678672591 using scale factor 900
172
+ Generated sigma sequences. Karras: tensor([47.9577, 40.8963, 34.7614, 29.4468, 24.8569, 20.9051, 17.5141, 14.6142,
173
+ 12.1431, 10.0455, 8.2718, 6.7783, 5.5261, 4.4811, 3.6131, 2.8959,
174
+ 2.3064, 1.8247, 1.4333, 1.1174, 0.8642, 0.6626, 0.5034, 0.3788,
175
+ 0.2820, 0.2075, 0.1509, 0.1083, 0.0766, 0.0533, 0.0000],
176
+ device='cuda:0'), Exponential: tensor([47.9577, 37.9304, 29.9997, 23.7272, 18.7662, 14.8425, 11.7391, 9.2846,
177
+ 7.3434, 5.8080, 4.5936, 3.6332, 2.8735, 2.2727, 1.7975, 1.4217,
178
+ 1.1244, 0.8893, 0.7034, 0.5563, 0.4400, 0.3480, 0.2752, 0.2177,
179
+ 0.1722, 0.1362, 0.1077, 0.0852, 0.0674, 0.0533, 0.0000],
180
+ device='cuda:0')
181
+
182
+ --- Starting Final Pass Blending ---
183
+
184
+
185
+ ==========
186
+ [Start of Sigma Sequence Logging]
187
+ ==========
188
+ [First Step - Step 1/31]
189
+ Step Size: 0.186242
190
+ Dynamic Blend Factor: 0.080000
191
+ Noise Scale: 1.250000
192
+ Smooth Blend: 0.018726
193
+ Blended Sigma: 47.957703
194
+ Final Sigma: 11.164686
195
+
196
+ ==========
197
+ [End of Sigma Sequence Logging]
198
+ ==========
199
+
200
+ ==========
201
+ [Start of Sigma Sequence Logging]
202
+ ==========
203
+ [First Step - Step 0/30]
204
+ Step Size: 0.186242
205
+ Blended Sigma: 47.957703
206
+ Final Sigma: 11.164686
207
+
208
+ ==========
209
+ [End of Sigma Sequence Logging]
210
+ ==========
211
+
212
+ --- Starting Final Pass Blending ---
213
+
214
+
215
+ --- Starting Final Pass Blending ---
216
+
217
+
218
+ --- Starting Final Pass Blending ---
219
+
220
+
221
+ --- Starting Final Pass Blending ---
222
+
223
+
224
+ --- Starting Final Pass Blending ---
225
+
226
+
227
+ --- Starting Final Pass Blending ---
228
+
229
+
230
+ --- Starting Final Pass Blending ---
231
+
232
+
233
+ --- Starting Final Pass Blending ---
234
+
235
+
236
+ --- Starting Final Pass Blending ---
237
+
238
+
239
+ --- Starting Final Pass Blending ---
240
+
241
+
242
+ --- Starting Final Pass Blending ---
243
+
244
+ Sigma Variance: 8.835401
245
+
246
+ --- Starting Final Pass Blending ---
247
+
248
+ Sigma Variance: 8.867280
249
+
250
+ --- Starting Final Pass Blending ---
251
+
252
+ Sigma Variance: 8.940592
253
+
254
+ --- Starting Final Pass Blending ---
255
+
256
+ Sigma Variance: 9.023338
257
+
258
+ --- Starting Final Pass Blending ---
259
+
260
+
261
+ ==========
262
+ [Start of Sigma Sequence Logging]
263
+ ==========
264
+ [Middle Step - Step 16/31]
265
+ Step Size: 0.186242
266
+ Dynamic Blend Factor: 0.290000
267
+ Noise Scale: 0.949560
268
+ Smooth Blend: 0.121376
269
+ Blended Sigma: 2.716985
270
+ Final Sigma: 0.480494
271
+
272
+ ==========
273
+ [End of Sigma Sequence Logging]
274
+ ==========
275
+ Sigma Variance: 9.099936
276
+
277
+ --- Starting Final Pass Blending ---
278
+
279
+ Sigma Variance: 9.164114
280
+
281
+ --- Starting Final Pass Blending ---
282
+
283
+ Sigma Variance: 9.214593
284
+
285
+ --- Starting Final Pass Blending ---
286
+
287
+ Sigma Variance: 9.252495
288
+
289
+ --- Starting Final Pass Blending ---
290
+
291
+ Sigma Variance: 9.279906
292
+
293
+ --- Starting Final Pass Blending ---
294
+
295
+ Sigma Variance: 9.299084
296
+
297
+ --- Starting Final Pass Blending ---
298
+
299
+ Sigma Variance: 9.312086
300
+
301
+ --- Starting Final Pass Blending ---
302
+
303
+ Sigma Variance: 9.320630
304
+
305
+ --- Starting Final Pass Blending ---
306
+
307
+ Sigma Variance: 9.326054
308
+
309
+ --- Starting Final Pass Blending ---
310
+
311
+ Sigma Variance: 9.329365
312
+
313
+ --- Starting Final Pass Blending ---
314
+
315
+ Sigma Variance: 9.331285
316
+
317
+ --- Starting Final Pass Blending ---
318
+
319
+ Sigma Variance: 9.332323
320
+
321
+ --- Starting Final Pass Blending ---
322
+
323
+ Sigma Variance: 9.332817
324
+
325
+ --- Starting Final Pass Blending ---
326
+
327
+ Sigma Variance: 9.332994
328
+
329
+ --- Starting Final Pass Blending ---
330
+
331
+ Sigma Variance: 9.332994
332
+ [Sharpen Mask] Full sharpening applied at steps: [22, 23, 24, 25, 26, 27, 28, 29]
sd_simple_kes_v2/kes_config/default_config.yaml ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_save_directory: "modules/sd_simple_kes/image_generation_data"
2
+ graph_save_directory: "modules/sd_simple_kes/image_generation_data"
3
+ graph_save_enable: false
4
+
5
+ skip_prepass: false # has no change to image quality - not currently functioning as intended for early stop purposes
6
+ device: "cuda" #cpu or cuda
7
+ debug: true
8
+ global_randomize: false
9
+ #
10
+ sigma_scale_factor: 900
11
+ sigma_auto_enabled: true
12
+ sigma_auto_mode: sigma_min # Options: sigma_min, sigma_max
13
+ #
14
+ rho_rand: false
15
+ rho_rand_min: 3.00 # tested recommended settings threshold
16
+ rho_rand_max: 8.00 # tested recommended settings threshold
17
+ #rho: 7.571656624637901
18
+ rho: 7.959565031107985
19
+ rho_enable_randomization_type: false
20
+ rho_randomization_type: "log"
21
+ rho_randomization_percent: 0.1
22
+ #
23
+ sigma_min_rand: false
24
+ sigma_min_rand_min: 0.001 # tested recommended settings
25
+ sigma_min_rand_max: 0.02 # tested recommended settings threshold
26
+ sigma_min: 0.13757067353874633
27
+ sigma_min_enable_randomization_type: false
28
+ sigma_min_randomization_type: "asymmetric"
29
+ sigma_min_randomization_percent: 0.2
30
+ #
31
+ sigma_max_rand: false
32
+ sigma_max_rand_min: 25
33
+ sigma_max_rand_max: 60
34
+ sigma_max: 47.95768510805332
35
+ sigma_max_enable_randomization_type: false
36
+ sigma_max_randomization_type: "log"
37
+ sigma_max_randomization_percent: 0.25
38
+ #
39
+ start_blend_rand: false
40
+ start_blend_rand_min: 0.04 # tested recommended settings threshold
41
+ start_blend_rand_max: 0.11 # tested recommended settings threshold
42
+ start_blend: 0.08
43
+ start_blend_enable_randomization_type: false
44
+ start_blend_randomization_type: "asymmetric"
45
+ start_blend_randomization_percent: 0.1
46
+ #
47
+ end_blend_rand: false
48
+ end_blend_rand_min: 0.4 # tested recommended settings threshold
49
+ end_blend_rand_max: 0.6 # tested recommended settings threshold
50
+ end_blend: 0.5
51
+ end_blend_enable_randomization_type: false
52
+ end_blend_randomization_type: "asymmetric"
53
+ end_blend_randomization_percent: 0.2
54
+ #
55
+ sharpness_rand: false
56
+ sharpness_rand_min: 0.75 # tested recommended settings threshold
57
+ sharpness_rand_max: 0.95 # tested recommended settings threshold
58
+ sharpness: 0.85 # Note: Visible changes in image between 2-15. Above 15 - notable differences. At 50+ - poor image quality. sharpness not applied above 0.95
59
+ sharpen_variance_threshold: 0.01
60
+ sharpen_last_n_steps: 10
61
+ sharpen_mode: "full" # Options: last_n, full, both
62
+ sharpness_enable_randomization_type: false
63
+ sharpness_randomization_type: "asymmetric"
64
+ sharpness_randomization_percent: 0.2
65
+ #
66
+ step_progress_mode: "linear" # Options supported (default = "linear"), "exponential", "logarithmic", or "sigmoid". If exponential, uses "exp_power"
67
+ exp_power: 2
68
+ #
69
+ initial_step_size_rand: false
70
+ initial_step_size_rand_min: 0.7
71
+ initial_step_size_rand_max: 1.0
72
+ initial_step_size: 0.9
73
+ initial_step_size_enable_randomization_type: false
74
+ initial_step_size_randomization_type: "asymmetric"
75
+ initial_step_size_randomization_percent: 0.2
76
+ #
77
+ final_step_size_rand: false
78
+ final_step_size_rand_min: 0.1
79
+ final_step_size_rand_max: 0.3
80
+ final_step_size: 0.20
81
+ final_step_size_enable_randomization_type: false
82
+ final_step_size_randomization_type: "asymmetric"
83
+ final_step_size_randomization_percent: 0.2
84
+ #
85
+ step_size_factor_rand: false
86
+ step_size_factor_rand_min: 0.65
87
+ step_size_factor_rand_max: 0.85
88
+ step_size_factor: 0.80814932869181
89
+ step_size_factor_enable_randomization_type: false
90
+ step_size_factor_randomization_type: "asymmetric"
91
+ step_size_factor_randomization_percent: 0.2
92
+ #
93
+ initial_noise_scale_rand: false
94
+ initial_noise_scale_rand_min: 1.0
95
+ initial_noise_scale_rand_max: 1.5
96
+ initial_noise_scale: 1.25
97
+ initial_noise_scale_enable_randomization_type: false
98
+ initial_noise_scale_randomization_type: "asymmetric"
99
+ initial_noise_scale_randomization_percent: 0.2
100
+ #
101
+ final_noise_scale_rand: false
102
+ final_noise_scale_rand_min: 0.6
103
+ final_noise_scale_rand_max: 1.0
104
+ final_noise_scale: 0.80
105
+ final_noise_scale_enable_randomization_type: false
106
+ final_noise_scale_randomization_type: "asymmetric"
107
+ final_noise_scale_randomization_percent: 0.2
108
+ #
109
+ smooth_blend_factor_rand: false
110
+ smooth_blend_factor_rand_min: 6
111
+ smooth_blend_factor_rand_max: 11
112
+ smooth_blend_factor: 9.426004103284665
113
+ smooth_blend_factor_enable_randomization_type: false
114
+ smooth_blend_factor_randomization_type: "asymmetric"
115
+ smooth_blend_factor_randomization_percent: 0.2
116
+ #
117
+ noise_scale_factor_rand: false
118
+ noise_scale_factor_rand_min: 0.75
119
+ noise_scale_factor_rand_max: 0.95
120
+ noise_scale_factor: 0.8113992828873163
121
+ noise_scale_factor_enable_randomization_type: false
122
+ noise_scale_factor_randomization_type: "asymmetric"
123
+ noise_scale_factor_randomization_percent: 0.2
124
+
125
+ # Experimental settings
126
+ early_stopping_threshold_rand: false
127
+ early_stopping_threshold_rand_min: 0.001
128
+ early_stopping_threshold_rand_max: 0.02
129
+ early_stopping_threshold: 0.06
130
+ early_stopping_method: max # Options: mean, max, sum
131
+ sigma_variance_scale: 0.1 # *100 = % of current sigma, increase to reduce false early stopping, try 0.07 or 0.10
132
+ safety_minimum_stop_step: 10 # means won't consider until past this step, consider increasing this to increase minimum steps to process the image
133
+ recent_change_convergence_delta: 0.6 # this is the change between mean/max variable changes between sigmas. Keep this relatively low. This contributes directly to when we stop.
134
+ #min_visual_sigma: 50 # Increase from 10 to push later into the denoising sequence
135
+ early_stopping_threshold_enable_randomization_type: false
136
+ early_stopping_threshold_randomization_type: "asymmetric"
137
+ early_stopping_threshold_randomization_percent: 0.2
sd_simple_kes_v2/plot_sigma_sequence.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ import os
4
+
5
+ def plot_sigma_sequence(sigs, stopping_index, log_filename, save_directory="modules/sd_simple_kes_v2/image_generation_data", show_plot=False):
6
+ """
7
+ Plot the sigma sequence and mark the early stopping point.
8
+
9
+ Parameters:
10
+ - sigs: The sigma tensor or numpy array (can be truncated if stopping early).
11
+ - stopping_index: The step index where early stopping was triggered.
12
+ - log_filename: The filename of the generation log (used to match the graph name).
13
+ - save_directory: The folder where the plot should be saved.
14
+ - show_plot: Set to True to display the plot interactively.
15
+ """
16
+
17
+ # Extract base name to match log filename
18
+ base_filename = os.path.splitext(os.path.basename(log_filename))[0]
19
+ graph_filename = f"{base_filename}_sigma_plot.png"
20
+ graph_path = os.path.join(save_directory, graph_filename)
21
+
22
+ # Prepare sigma sequence for plotting
23
+ sigs_np = sigs.cpu().numpy() if hasattr(sigs, 'cpu') else np.array(sigs)
24
+ x = np.arange(len(sigs_np))
25
+
26
+ # Plotting
27
+ plt.figure(figsize=(10, 6))
28
+ plt.plot(x, sigs_np, label='Sigma Sequence', marker='o')
29
+ plt.axvline(x=stopping_index, color='red', linestyle='--', label=f'Stopping Point: {stopping_index}')
30
+ plt.xlabel('Step Index')
31
+ plt.ylabel('Sigma Value')
32
+ plt.title('Sigma Sequence with Early Stopping Point')
33
+ plt.legend()
34
+ plt.grid(True)
35
+ plt.tight_layout()
36
+ plt.savefig(graph_path)
37
+
38
+ if show_plot:
39
+ plt.show()
40
+
41
+ plt.close()
42
+ return graph_path
sd_simple_kes_v2/setup.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ remove the appended name "_v2" from all files/folders and place into the modules folder of A1111/Forge.
2
+
3
+ This version is experimental.
sd_simple_kes_v2/simple_kes_v2.py ADDED
@@ -0,0 +1,785 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from modules.sd_simple_kes_v2.get_sigmas import get_sigmas_karras, get_sigmas_exponential
4
+ from modules.sd_simple_kes_v2.validate_config import validate_config
5
+ from modules.sd_simple_kes_v2.plot_sigma_sequence import plot_sigma_sequence
6
+ import os
7
+ import yaml
8
+ import random
9
+ from datetime import datetime
10
+ import warnings
11
+ import math
12
+ from typing import Optional
13
+ import json
14
+ import numpy as np
15
+
16
+
17
+
18
+ def simple_kes_scheduler_v22(n: int, sigma_min: float, sigma_max: float, device: torch.device) -> torch.Tensor:
19
+ scheduler = SimpleKEScheduler(n=n, sigma_min=sigma_min, sigma_max=sigma_max, device=device)
20
+ return scheduler()
21
+
22
+
23
+
24
+ class SharedLogger:
25
+ def __init__(self, debug=False):
26
+ self.debug = debug
27
+ self.log_buffer = []
28
+
29
+ def log(self, message):
30
+ if self.debug:
31
+ self.log_buffer.append(message)
32
+
33
+ def dump(self):
34
+ return "\n".join(self.log_buffer)
35
+
36
+
37
+ class SimpleKEScheduler:
38
+ """
39
+ SimpleKEScheduler
40
+ ------------------
41
+ A hybrid scheduler that combines Karras-style sigma sampling
42
+ with exponential decay and blending controls. Supports parameterized
43
+ customization for use in advanced diffusion pipelines.
44
+
45
+ Parameters:
46
+ - steps (int): Number of inference steps.
47
+ - device (torch.device): Target device (e.g. 'cuda').
48
+ - config (dict): Scheduler-specific configuration options.
49
+
50
+ Usage:
51
+ scheduler = SimpleKEScheduler(steps=30, device='cuda', config=config_dict)
52
+ sigmas = scheduler.get_sigmas()
53
+ """
54
+
55
+
56
+ def __init__(self, n: int, sigma_min: Optional[float] = None, sigma_max: Optional[float] = None, device: torch.device = "cpu", logger=None, **kwargs)->torch.Tensor:
57
+ self.steps = n if n is not None else 10
58
+ self.original_steps = n
59
+ self.device = torch.device(device if isinstance(device, str) else device)
60
+ self.sigma_min = sigma_min
61
+ self.sigma_max = sigma_max
62
+
63
+ self.RANDOMIZATION_TYPE_ALIASES = {
64
+ 'symmetric': 'symmetric', 'sym': 'symmetric', 's': 'symmetric',
65
+ 'asymmetric': 'asymmetric', 'assym': 'asymmetric', 'a': 'asymmetric',
66
+ 'logarithmic': 'logarithmic', 'log': 'logarithmic', 'l': 'logarithmic',
67
+ 'exponential': 'exponential', 'exp': 'exponential', 'e': 'exponential'
68
+ }
69
+
70
+
71
+ # Temporarily hold overrides from kwargs
72
+ self._overrides = kwargs.copy()
73
+ self.config_path = os.path.abspath(os.path.normpath(os.path.join("modules", "sd_simple_kes", "kes_config", "default_config.yaml")))
74
+ self.config_data = self.load_config()
75
+ self.config = self.config_data.copy()
76
+ self.settings = self.config.copy()
77
+
78
+ # Apply overrides from kwargs if present
79
+ for k, v in self._overrides.items():
80
+ if k in self.settings:
81
+ self.settings[k] = v
82
+ setattr(self, k, v)
83
+
84
+ self.debug = self.settings.get("debug", False)
85
+ logger = SharedLogger(debug=self.debug)
86
+ self.logger=logger
87
+ self.log = self.logger.log
88
+ validate_config(self.config, logger=self.logger)
89
+
90
+ for key, value in self.settings.items():
91
+ setattr(self, key, value)
92
+
93
+ if self.settings.get("global_randomize", False):
94
+ self.apply_global_randomization()
95
+ self.settings = self.settings.copy()
96
+
97
+ self.re_randomizable_keys = [
98
+ "sigma_min", "sigma_max", "start_blend", "end_blend", "sharpness",
99
+ "early_stopping_threshold",
100
+ "initial_step_size", "final_step_size",
101
+ "initial_noise_scale", "final_noise_scale",
102
+ "smooth_blend_factor", "step_size_factor", "noise_scale_factor", "rho"
103
+ ]
104
+
105
+ for key in self.re_randomizable_keys:
106
+ value = self.settings.get(key)
107
+ if value is None:
108
+ raise KeyError(f"[KEScheduler] Missing required setting: {key}")
109
+ setattr(self, key, value)
110
+
111
+ self.sigma_variance_threshold = self.settings.get('sharpen_variance_threshold', 0.01)
112
+ self.N = self.settings.get('sharpen_last_n_steps', 10)
113
+
114
+ self.initialize_generation_filename()
115
+ self.relative_converged = False
116
+ self.max_converged = False
117
+ self.delta_converged = False
118
+ self.early_stop_triggered = False
119
+
120
+ def __call__(self):
121
+ # First pass: Run prepass to determine predicted_stop_step
122
+ if not self.settings.get('skip_prepass', False):
123
+ self.prepass_compute_sigmas()
124
+
125
+ else:
126
+ # Build sigma sequence directly (without prepass)
127
+ self.config_values()
128
+ self.generate_sigmas_schedule()
129
+ self.blend_sigma_sequence(
130
+ sigs=self.sigs,
131
+ sigmas_karras=self.sigmas_karras,
132
+ sigmas_exponential=self.sigmas_exponential,
133
+ pre_pass = False
134
+ )
135
+ sigmas = self.compute_sigmas()
136
+ # Safety checks
137
+ if torch.isnan(sigmas).any():
138
+ raise ValueError("[SimpleKEScheduler] NaN detected in sigmas")
139
+ if torch.isinf(sigmas).any():
140
+ raise ValueError("[SimpleKEScheduler] Inf detected in sigmas")
141
+ if (sigmas <= 0).all():
142
+ raise ValueError("[SimpleKEScheduler] All sigma values are <= 0")
143
+ if (sigmas > 1000).all():
144
+ raise ValueError("[SimpleKEScheduler] Sigma values are extremely large — might explode the model")
145
+ # Save logs to file
146
+ if self.debug:
147
+ self.save_generation_settings()
148
+ # Return final sigmas to the scheduler caller
149
+ return sigmas
150
+
151
+ def initialize_generation_filename(self, folder=None, base_name="generation_log", ext="txt"):
152
+ """
153
+ Initialize the log filename early so it can be used throughout the process.
154
+ """
155
+ if folder is None:
156
+ folder = self.settings.get('log_save_directory', 'modules/sd_simple_kes/image_generation_data')
157
+ folder = os.path.abspath(os.path.normpath(folder))
158
+
159
+ os.makedirs(folder, exist_ok=True)
160
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
161
+
162
+ self.log_filename = os.path.join(folder, f"{base_name}_{timestamp}.{ext}")
163
+
164
+ def save_generation_settings(self):
165
+ """
166
+ Save the generation log with configurable directory, base name, and extension.
167
+
168
+ Parameters:
169
+ - folder (str): Optional custom directory to save the log file.
170
+ - base_name (str): The base name for the file (default is 'generation_log').
171
+ - ext (str): The file extension to use (default is 'txt').
172
+ """
173
+ with open(self.log_filename, "w", encoding = 'utf-8') as f:
174
+ for line in self.logger.log_buffer:
175
+ f.write(f"{line}\n")
176
+ self.log(f"[SimpleKEScheduler] Generation settings saved to {self.log_filename}")
177
+
178
+ self.logger.log_buffer.clear()
179
+
180
+ def save_image_plot(self):
181
+ if self.settings.get('graph_save_enable', False):
182
+ graph_plot = plot_sigma_sequence(
183
+ sigs[:i + 1],
184
+ i,
185
+ self.log_filename,
186
+ self.settings.get('graph_save_directory', 'modules/sd_simple_kes/image_generation_data'),
187
+ self.settings.get('graph_save_enable', False)
188
+ )
189
+ self.log(f"Sigma sequence plot saved to {graph_plot}")
190
+
191
+ def load_config(self):
192
+ try:
193
+ with open(self.config_path, 'r', encoding = 'utf-8') as f:
194
+ user_config = yaml.safe_load(f)
195
+ return user_config
196
+ except FileNotFoundError:
197
+ self.log(f"Config file not found: {self.config_path}. Using empty config.")
198
+ return {}
199
+ except yaml.YAMLError as e:
200
+ self.log(f"Error loading config file: {e}")
201
+ return {}
202
+
203
+ def apply_global_randomization(self):
204
+ """Force randomization for all eligible settings by enabling _rand flags and re-randomizing values."""
205
+ # First pass: turn on all _rand flags if corresponding _rand_min/_rand_max exists
206
+ for key in list(self.settings.keys()):
207
+ if key.endswith("_rand_min") or key.endswith("_rand_max"):
208
+ base_key = key.rsplit("_rand_", 1)[0]
209
+ rand_flag_key = f"{base_key}_rand"
210
+ self.settings[rand_flag_key] = True
211
+ # Step 2: If global_randomize is active, re-randomize all eligible keys
212
+ if self.settings.get("global_randomize", False):
213
+ if key not in self.settings:
214
+ raise KeyError(f"[apply_global_randomization] Missing required key: {key}")
215
+
216
+ default_val = self.settings[key]
217
+ randomized_val = self.get_random_or_default(key, default_val)
218
+ self.settings[key] = randomized_val
219
+ setattr(self, key, randomized_val)
220
+
221
+ def get_randomization_type(self, key_prefix):
222
+ """
223
+ Retrieves the randomization type for a given key, with fallback to 'asymmetric' if missing.
224
+ """
225
+ randomization_type_raw = self.settings.get(f'{key_prefix}_randomization_type', 'asymmetric')
226
+ randomization_type = self.RANDOMIZATION_TYPE_ALIASES.get(randomization_type_raw.lower(), 'asymmetric')
227
+ return randomization_type
228
+
229
+ def get_randomization_percent(self, key_prefix):
230
+ """
231
+ Retrieves the randomization percent for a given key, with fallback to 0.2 if missing.
232
+ """
233
+ return self.settings.get(f'{key_prefix}_randomization_percent', 0.2)
234
+
235
+
236
+ def get_random_between_min_max(self, key_prefix, default_value):
237
+ """
238
+ Picks a random value between _rand_min and _rand_max if _rand is True.
239
+ Otherwise, returns the base value.
240
+ """
241
+ randomize_flag = self.settings.get(f'{key_prefix}_rand', False)
242
+
243
+ if randomize_flag:
244
+ rand_min = self.settings.get(f'{key_prefix}_rand_min', default_value)
245
+ rand_max = self.settings.get(f'{key_prefix}_rand_max', default_value)
246
+
247
+ if rand_min == rand_max:
248
+ self.log(f"[Random Range] {key_prefix}: min and max are equal ({rand_min}). Using single value.")
249
+ return rand_min
250
+
251
+ value = random.uniform(rand_min, rand_max)
252
+ self.log(f"[Random Range] {key_prefix}: Picked random value {value} between {rand_min} and {rand_max}")
253
+ return value
254
+ else:
255
+ self.log(f"[Random Range] {key_prefix}: Randomization is OFF. Using base value {default_value}")
256
+ return default_value
257
+
258
+ def get_random_by_type(self, key_prefix, default_value):
259
+ randomization_enabled = self.settings.get(f'{key_prefix}_enable_randomization_type', False)
260
+
261
+ if not randomization_enabled:
262
+ self.log(f"[Randomization Type] {key_prefix}: Randomization type is OFF. Using base value {default_value}")
263
+ return default_value
264
+
265
+ randomization_type = self.get_randomization_type(key_prefix)
266
+ randomization_percent = self.get_randomization_percent(key_prefix)
267
+
268
+ if randomization_type == 'symmetric':
269
+ rand_min = default_value * (1 - randomization_percent)
270
+ rand_max = default_value * (1 + randomization_percent)
271
+ self.log(f"[Symmetric Randomization] {key_prefix}: Range {rand_min} to {rand_max}")
272
+
273
+ elif randomization_type == 'asymmetric':
274
+ rand_min = default_value * (1 - randomization_percent)
275
+ rand_max = default_value * (1 + (randomization_percent * 2))
276
+ self.log(f"[Asymmetric Randomization] {key_prefix}: Range {rand_min} to {rand_max}")
277
+
278
+ elif randomization_type == 'logarithmic':
279
+ rand_min = math.log(default_value * (1 - randomization_percent))
280
+ rand_max = math.log(default_value * (1 + randomization_percent))
281
+ value = math.exp(random.uniform(rand_min, rand_max))
282
+ self.log(f"[Logarithmic Randomization] {key_prefix}: Log-space randomization resulted in {value}")
283
+ return value
284
+
285
+ elif randomization_type == 'exponential':
286
+ rand_min = default_value * (1 - randomization_percent)
287
+ rand_max = default_value * (1 + randomization_percent)
288
+ base_value = random.uniform(rand_min, rand_max)
289
+ value = math.exp(base_value)
290
+ self.log(f"[Exponential Randomization] {key_prefix}: Randomized exponential value {value}")
291
+ return value
292
+
293
+ else:
294
+ self.log(f"[Randomization Type] {key_prefix}: Invalid randomization type {randomization_type}. Using base value.")
295
+ return default_value
296
+
297
+ value = random.uniform(rand_min, rand_max)
298
+
299
+ self.log(f"[Randomization Type] {key_prefix}: Randomized value {value}")
300
+ return value
301
+
302
+ def get_random_or_default(self, key_prefix, default_value):
303
+ """
304
+ Selects randomization method based on active flags:
305
+ - If both enabled → prioritize randomization type (or min/max if you prefer).
306
+ - If only one enabled → apply that one.
307
+ - If neither → return default value.
308
+ """
309
+ rand_type_enabled = self.settings.get(f'{key_prefix}_enable_randomization_type', False)
310
+ min_max_enabled = self.settings.get(f'{key_prefix}_rand', False)
311
+
312
+ if rand_type_enabled and min_max_enabled:
313
+ self.log(f"[Randomization Policy] Both min/max and randomization type enabled for {key_prefix}. System will prioritize randomization type.")
314
+ result_value = self.get_random_by_type(key_prefix, default_value)
315
+
316
+ elif rand_type_enabled:
317
+ result_value = self.get_random_by_type(key_prefix, default_value)
318
+ self.log(f"[Randomization] {key_prefix}: Applied randomization type. Final value: {result_value}")
319
+
320
+ elif min_max_enabled:
321
+ result_value = self.get_random_between_min_max(key_prefix, default_value)
322
+ self.log(f"[Randomization] {key_prefix}: Applied min/max randomization. Final value: {result_value}")
323
+
324
+ else:
325
+ result_value = default_value
326
+ self.log(f"[Randomization] {key_prefix}: No randomization applied. Using default value: {result_value}")
327
+
328
+ return result_value
329
+
330
+ def blend_sigma_sequence(self, sigs, sigmas_karras, sigmas_exponential, pre_pass=False):
331
+ self.progress = torch.linspace(0, 1, len(sigmas_karras)).to(self.device)
332
+ self.blended_sigmas = []
333
+ self.change_log = []
334
+ self.relative_converged = False
335
+ self.max_converged = False
336
+ self.delta_converged = False
337
+ self.early_stop_triggered = False
338
+
339
+ """
340
+ Computes the blended sigma sequence using adaptive step sizes, dynamic blend factors,
341
+ and noise scaling across the progress of the diffusion process.
342
+
343
+ This method blends sigma values from the Karras and Exponential schedules using
344
+ a smooth, progress-dependent interpolation. It applies adaptive scaling based on
345
+ step size and noise scale factors to each sigma in the sequence.
346
+
347
+ Parameters:
348
+ -----------
349
+ sigs : torch.Tensor
350
+ A pre-allocated tensor where the computed sigma sequence will be stored.
351
+ This tensor must match the shape of the sigma schedules.
352
+
353
+ sigmas_karras : torch.Tensor
354
+ The sigma sequence generated using the Karras schedule.
355
+
356
+ sigmas_exponential : torch.Tensor
357
+ The sigma sequence generated using the Exponential schedule.
358
+
359
+ Returns:
360
+ --------
361
+ sigs : torch.Tensor
362
+ The final blended and scaled sigma sequence.
363
+
364
+ Notes:
365
+ ------
366
+ - This method is used in both the prepass and final pass of the scheduler.
367
+ - The progress tensor is computed linearly from 0 to 1 over the length of the sequence.
368
+ - The method uses class attributes for step size factors, blend factors, and noise scaling.
369
+ - This method modifies `sigs` in place.
370
+ """
371
+ for i in range(len(sigs)):
372
+ if self.step_progress_mode == "linear":
373
+ progress_value = self.progress[i]
374
+ elif self.step_progress_mode == "exponential":
375
+ progress_value = self.progress[i] ** self.settings.get("exp_power", 2)
376
+ elif self.step_progress_mode == "logarithmic":
377
+ progress_value = torch.log1p(self.progress[i] * (torch.exp(torch.tensor(1.0)) - 1))
378
+ elif self.step_progress_mode == "sigmoid":
379
+ progress_value = 1 / (1 + torch.exp(-12 * (self.progress[i] - 0.5)))
380
+ else:
381
+ progress_value = self.progress[i] # Fallback to linear (previous version used)
382
+ dynamic_blend_factor = self.start_blend * (1 - self.progress[i]) + self.end_blend * self.progress[i]
383
+ smooth_blend = torch.sigmoid((dynamic_blend_factor - 0.5) * self.smooth_blend_factor)
384
+ noise_scale = self.initial_noise_scale * (1 - self.progress[i]) + self.final_noise_scale * self.progress[i] * self.noise_scale_factor
385
+ self.blended_sigma = sigmas_karras[i] * (1 - smooth_blend) + sigmas_exponential[i] * smooth_blend
386
+
387
+
388
+ self.step_size = self.initial_step_size * (1 - progress_value) + self.final_step_size * progress_value * self.step_size_factor
389
+ sigs[i] = self.blended_sigma * self.step_size * noise_scale
390
+
391
+ self.change = torch.abs(sigs[i] - sigs[i - 1])
392
+ self.change_log.append(self.change.item())
393
+ relative_sigma_progress = (self.blended_sigma - sigs[-1].item()) / self.blended_sigma
394
+ recent_changes = torch.abs(torch.tensor(self.change_log[-5:]))
395
+ max_change = torch.max(recent_changes).item()
396
+ mean_change = torch.mean(recent_changes).item()
397
+ #percent_of_threshold = (max_change / self.early_stopping_threshold) * 100
398
+ self.delta_change = abs(max_change - mean_change)
399
+ self.blended_sigmas.append(self.blended_sigma.item())
400
+
401
+ # Check 1: Relative sigma progress
402
+ self.relative_converged = relative_sigma_progress < 0.05
403
+ # Check 2: Max recent sigma change
404
+ self.max_converged = max_change < self.early_stopping_threshold
405
+ # Check 3: Max-mean difference converged
406
+ self.delta_converged = self.delta_change < self.settings.get('recent_change_convergence_delta', 0.02)
407
+
408
+ if pre_pass:
409
+ if i >= 2:
410
+ sigma_rate = abs(self.blended_sigmas[i] - self.blended_sigmas[i - 1])
411
+ previous_sigma_rate = abs(self.blended_sigmas[i - 1] - self.blended_sigmas[i - 2])
412
+ if sigma_rate > previous_sigma_rate:
413
+ self.log(f"Sigma decline is slowing down → possible plateau at step {i+1}.")
414
+
415
+ if i == 0:
416
+ self.log("\n--- Starting Pre-Pass Blending ---\n")
417
+ step_label = "Prepass First Step"
418
+ elif i == len(sigmas_karras) - 1:
419
+ step_label = "Prepass Last Step"
420
+ else:
421
+ step_label = None
422
+
423
+ if step_label:
424
+ self.log(f"[{step_label} - Step {i}/{len(sigs)}] Blended Sigma: {self.blended_sigma:.6f}, Final Sigma: {sigs[i]:.6f}")
425
+ self.log(f"{step_label} Delta Converged: {self.delta_converged} delta_change: {self.delta_change:.6f}, Target Default Settings:{self.recent_change_convergence_delta}")
426
+
427
+ # Start checking for early stopping after minimum steps
428
+ if i > self.safety_minimum_stop_step and len(self.change_log) > 10:
429
+ # Calculate variance and dynamic threshold
430
+ self.blended_tensor = torch.tensor(self.blended_sigmas)
431
+ if self.device == 'cpu':
432
+ self.sigma_variance = np.var(self.blended_sigmas)
433
+ else:
434
+ self.sigma_variance = torch.var(sigs).item()
435
+
436
+ self.min_sigma_threshold = self.sigma_variance * self.settings.get('sigma_variance_scale', 0.05) # scale factor can be tuned
437
+ self.log(f"\n--- Early Stopping Evaluation at Step {i+1} ---")
438
+ self.log(f"Current Blended Sigma: {self.blended_sigma:.6f}")
439
+ self.log(f"Sigma Variance: {self.sigma_variance:.6f}")
440
+ self.log(f"Relative Sigma Progress: {relative_sigma_progress:.6f}")
441
+ self.log(f"Max Recent Sigma Change: {max_change:.6f}")
442
+ self.log(f"Mean Recent Sigma Change: {mean_change:.6f}")
443
+
444
+
445
+ # Reason for continuing (sigma still too high)
446
+ if self.blended_sigma > self.min_sigma_threshold:
447
+ self.log(f"Blended Sigma {self.blended_sigma:.6f} exceeds min sigma threshold {self.min_sigma_threshold:.6f} → Continuing.\n")
448
+
449
+ # Start Early Stopping Checks
450
+ if self.early_stopping_method == "mean":
451
+ mean_change = sum(self.change_log) / len(self.change_log)
452
+ if mean_change < self.early_stopping_threshold:
453
+ skipped_steps = len(sigmas_karras) - (i)
454
+ self.log(f"Early stopping triggered by mean at step {i}. Mean change: {mean_change:.6f}. Steps used: {i}/{len(sigmas_karras)}, steps skipped: {skipped_steps}")
455
+ self.save_image_plot()
456
+
457
+ elif self.early_stopping_method == "max":
458
+ #max_change = max(self.change_log)
459
+ if max_change < self.early_stopping_threshold:
460
+ skipped_steps = len(sigmas_karras) - (i)
461
+ self.log(f"Early stopping triggered by mean at step {i}. Mean change: {max_change:.6f}. Steps used: {i}/{len(sigmas_karras)}, steps skipped: {skipped_steps}")
462
+ self.save_image_plot()
463
+
464
+ elif self.early_stopping_method == "sum":
465
+ stable_steps = sum(
466
+ 1 for j in range(1, len(self.change_log))
467
+ if abs(self.change_log[j]) < self.early_stopping_threshold * abs(sigs[j])
468
+ )
469
+ if stable_steps >= 0.8 * len(self.change_log):
470
+ skipped_steps = len(sigmas_karras) - (i)
471
+ self.log(f"Early stopping triggered by sum at step {i}. Stable steps: {stable_steps}/{len(self.change_log)}. Steps used: {i}/{len(sigmas_karras)}, steps skipped: {skipped_steps}")
472
+ self.save_image_plot()
473
+
474
+ if self.relative_converged and self.max_converged and self.delta_converged:
475
+ self.early_stop_triggered = True
476
+ self.log(f"\n--- Early Stopping Evaluation at Step {i+1} ---")
477
+ self.log(f"Relative Sigma Progress: {relative_sigma_progress:.6f}")
478
+ self.log(f"Max Recent Sigma Change: {max_change:.6f}")
479
+ self.log(f"Mean Recent Sigma Change: {mean_change:.6f}")
480
+ self.log(f"Delta Change: {delta_change:.6f} (Target: {self.settings.get('recent_change_convergence_delta', 0.02)})")
481
+ self.log(f"Early stopping criteria met at step {i+1} based on all convergence checks.")
482
+ self.predicted_stop_step = i
483
+ #self.steps = self.predicted_stop_step
484
+ break
485
+
486
+ # === Final Pass ===
487
+
488
+ #for i in range(len(sigmas_karras)):
489
+ for i in range(len(sigs)):
490
+ if not pre_pass:
491
+ self.log("\n--- Starting Final Pass Blending ---\n")
492
+
493
+ if i == 0:
494
+ step_label = "First Step"
495
+ elif i == len(sigmas_karras) // 2:
496
+ step_label = "Middle Step"
497
+ elif i == len(sigmas_karras) - 1:
498
+ step_label = "Last Step"
499
+ else:
500
+ step_label = None
501
+
502
+ dynamic_blend_factor = self.start_blend * (1 - self.progress[i]) + self.end_blend * self.progress[i]
503
+ smooth_blend = torch.sigmoid((dynamic_blend_factor - 0.5) * self.smooth_blend_factor)
504
+ noise_scale = self.initial_noise_scale * (1 - self.progress[i]) + self.final_noise_scale * self.progress[i] * self.noise_scale_factor
505
+ blended_sigma = sigmas_karras[i] * (1 - smooth_blend) + sigmas_exponential[i] * smooth_blend
506
+
507
+
508
+ sigs[i] = blended_sigma * self.step_size * noise_scale
509
+ self.blended_sigmas.append(blended_sigma.item())
510
+
511
+ if step_label:
512
+ if not pre_pass: # Only log detailed steps in the final pass
513
+ self.log("\n" + "=" * 10 + "\n[Start of Sigma Sequence Logging]\n" + "=" * 10)
514
+ self.log(f"[{step_label} - Step {i}/{len(sigmas_karras)}]"
515
+ f"\nStep Size: {self.step_size:.6f}"
516
+ f"\nDynamic Blend Factor: {dynamic_blend_factor:.6f}"
517
+ f"\nNoise Scale: {noise_scale:.6f}"
518
+ f"\nSmooth Blend: {smooth_blend:.6f}"
519
+ f"\nBlended Sigma: {blended_sigma:.6f}"
520
+ f"\nFinal Sigma: {sigs[i]:.6f}")
521
+ self.log("\n" + "=" * 10 + "\n[End of Sigma Sequence Logging]\n" + "=" * 10)
522
+
523
+ elif pre_pass: # Optional: Log a simple summary in the prepass
524
+ self.log(f"[Prepass {step_label} - Step {i}/{len(sigmas_karras)}] "
525
+ f"Blended Sigma: {blended_sigma:.6f}, Final Sigma: {sigs[i]:.6f}")
526
+
527
+ if i == 0:
528
+ step_label = "First Step"
529
+ elif i == len(sigmas_karras) - 1:
530
+ step_label = "Last Step"
531
+ else:
532
+ step_label = None
533
+
534
+ if step_label:
535
+ self.log("\n" + "=" * 10 + "\n[Start of Sigma Sequence Logging]\n" + "=" * 10)
536
+ self.log(f"[{step_label} - Step {i}/{len(sigs)}]"
537
+ f"\nStep Size: {self.step_size:.6f}"
538
+ f"\nBlended Sigma: {blended_sigma:.6f}"
539
+ f"\nFinal Sigma: {sigs[i]:.6f}")
540
+ self.log("\n" + "=" * 10 + "\n[End of Sigma Sequence Logging]\n" + "=" * 10)
541
+
542
+ if i > 0:
543
+ self.change = torch.abs(sigs[i] - sigs[i - 1])
544
+ self.change_log.append(self.change.item())
545
+
546
+ # Early Stopping Evaluation
547
+ if i > self.safety_minimum_stop_step and len(self.change_log) > 5:
548
+ #relative_sigma_progress = (blended_sigma - self.sigs[-1].item()) / blended_sigma
549
+ final_target_sigma = sigmas_karras[-1].item() # or use min(self.sigmas) if preferred
550
+ #relative_sigma_progress = (blended_sigma - final_target_sigma) / blended_sigma
551
+ if blended_sigma != 0:
552
+ relative_sigma_progress = (blended_sigma - final_target_sigma) / blended_sigma
553
+ else:
554
+ relative_sigma_progress = 0 # Assume fully converged if blended_sigma is 0
555
+ # Optional: Show variance but no need to stop on it
556
+ self.sigma_variance = torch.var(sigs).item() if self.device != 'cpu' else np.var(self.blended_sigmas)
557
+ self.log(f"Sigma Variance: {self.sigma_variance:.6f}")
558
+
559
+ if pre_pass and self.early_stop_triggered:
560
+ return sigs[:self.predicted_stop_step] # Return only the usable sequence
561
+ else:
562
+ return sigs
563
+
564
+ def generate_sigmas_schedule(self):
565
+ """
566
+ Generates the sigma schedules required for the hybrid blending process.
567
+
568
+ The Karras and Exponential sigma sequences are created to provide two distinct
569
+ noise scaling strategies:
570
+ - The Karras sequence offers a more aggressive noise decay, commonly used in
571
+ modern schedulers for improved image quality and denoising stability.
572
+ - The Exponential sequence provides a traditional log-space noise schedule.
573
+
574
+ These two sequences are dynamically blended in later steps using progress-dependent
575
+ weights to produce a custom sigma path that combines the advantages of both approaches.
576
+
577
+ This blending process is critical to the scheduler's ability to:
578
+ - Adapt noise scaling across steps.
579
+ - Control the sharpness and smoothness of transitions.
580
+ - Support early stopping based on sigma convergence patterns.
581
+
582
+ These sigma sequences must be regenerated in both the prepass (for early stopping detection)
583
+ and the final pass (for polished sigma application), ensuring both passes are synchronized
584
+ with the current step count and randomization settings.
585
+ """
586
+
587
+
588
+ self.sigmas_karras = get_sigmas_karras(n=self.steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, rho=self.rho, device=self.device)
589
+ self.sigmas_exponential = get_sigmas_exponential(n=self.steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, device=self.device)
590
+ target_length = min(len(self.sigmas_karras), len(self.sigmas_exponential))
591
+ self.sigmas_karras = self.sigmas_karras[:target_length]
592
+ self.sigmas_exponential = self.sigmas_exponential[:target_length]
593
+
594
+ self.log(f"Generated sigma sequences. Karras: {self.sigmas_karras}, Exponential: {self.sigmas_exponential}")
595
+
596
+ if self.sigmas_karras is None:
597
+ raise ValueError(f"Sigmas Karras:{self.sigmas_karras} Failed to generate or assign sigmas correctly.")
598
+ if self.sigmas_exponential is None:
599
+ raise ValueError(f"Sigmas Exponential: {self.sigmas_exponential} Failed to generate or assign sigmas correctly.")
600
+ self.sigmas_karras = torch.zeros(self.steps).to(self.device)
601
+ self.sigmas_exponential = torch.zeros(self.steps).to(self.device)
602
+ try:
603
+ pass
604
+ except Exception as e:
605
+ self.log(f"Error generating sigmas: {e}")
606
+
607
+ if len(self.sigmas_karras) < len(self.sigmas_exponential):
608
+ # Pad `sigmas_karras` with the last value
609
+ padding_karras = torch.full((len(self.sigmas_exponential) - len(self.sigmas_karras),), self.sigmas_karras[-1]).to(self.sigmas_karras.self.device)
610
+ self.sigmas_karras = torch.cat([self.sigmas_karras, padding_karras])
611
+ elif len(self.sigmas_karras) > len(self.sigmas_exponential):
612
+ # Pad `sigmas_exponential` with the last value
613
+ padding_exponential = torch.full((len(self.sigmas_karras) - len(self.sigmas_exponential),), self.sigmas_exponential[-1]).to(self.sigmas_exponential.device)
614
+ self.sigmas_exponential = torch.cat([self.sigmas_exponential, padding_exponential])
615
+
616
+ # Now it's safe to compute sigs
617
+ start = math.log(self.sigma_max)
618
+ end = math.log(self.sigma_min)
619
+ self.sigs = torch.linspace(start, end, self.steps, device=self.device).exp()
620
+
621
+ # Ensure sigs contain valid values before using them
622
+ if torch.any(self.sigs > 0):
623
+ self.sigma_min, self.sigma_max = self.sigs[self.sigs > 0].min(), self.sigs.max()
624
+ else:
625
+ # If sigs are all invalid, set a safe fallback
626
+ self.sigma_min, self.sigma_max = self.min_threshold, self.min_threshold
627
+ self.log(f"Debugging Warning: No positive sigma values found! Setting fallback sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
628
+
629
+ return self.sigmas_karras, self.sigmas_exponential, self.sigs
630
+
631
+ def config_values(self):
632
+
633
+
634
+ #Ensures sigma_min is always less than sigma_max for edge cases
635
+ if self.sigma_min >= self.sigma_max:
636
+ correction_factor = random.uniform(0.01, 0.99)
637
+ old_sigma_min = self.sigma_min
638
+ self.sigma_min = self.sigma_max * correction_factor
639
+ self.log(f"[Correction] sigma_min ({old_sigma_min}) was >= sigma_max ({self.sigma_max}). Adjusted sigma_min to {self.sigma_min} using correction factor {correction_factor}.")
640
+
641
+ self.log(f"Final sigmas: sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
642
+
643
+ # Other configs
644
+ self.sharpen_mode = self.settings.get('sharpen_mode', 'full')
645
+
646
+ if self.sigma_auto_enabled:
647
+ if self.sigma_auto_mode not in ["sigma_min", "sigma_max"]:
648
+ raise ValueError(f"[Config Error] Invalid sigma_auto_mode: {self.sigma_auto_mode}. Must be 'sigma_min' or 'sigma_max'.")
649
+
650
+ if self.sigma_auto_mode == "sigma_min":
651
+ self.sigma_min = self.sigma_max / self.sigma_scale_factor
652
+ self.log(f"[Auto Sigma Min] sigma_min set to {self.sigma_min} using scale factor {self.sigma_scale_factor}")
653
+
654
+ elif self.sigma_auto_mode == "sigma_max":
655
+ self.sigma_max = self.sigma_min * self.sigma_scale_factor
656
+ self.log(f"[Auto Sigma Max] sigma_max set to {self.sigma_max} using scale factor {self.sigma_scale_factor} and using a multiplier of {sigma_max_multipier} to account for smoother transitions")
657
+
658
+ # Always apply min_threshold AFTER auto scaling
659
+ self.min_threshold = random.uniform(1e-5, 5e-5)
660
+
661
+ if self.sigma_min < self.min_threshold:
662
+ self.log(f"[Threshold Enforcement] sigma_min was too low: {self.sigma_min} < min_threshold {self.min_threshold}")
663
+ self.sigma_min = self.min_threshold
664
+
665
+ if self.sigma_max < self.min_threshold:
666
+ self.log(f"[Threshold Enforcement] sigma_max was too low: {self.sigma_max} < min_threshold {self.min_threshold}")
667
+ self.sigma_max = self.min_threshold
668
+
669
+ self.early_stopping_method = self.settings.get("early_stopping_method", "mean")
670
+ valid_methods = ['mean', 'max', 'sum']
671
+ if self.early_stopping_method not in valid_methods:
672
+ self.log(f"[Config Correction] Invalid early_stopping_method: {self.early_stopping_method}. Defaulting to 'mean'.")
673
+ self.early_stopping_method = 'mean'
674
+
675
+ def prepass_compute_sigmas(self, skip_prepass = False)->torch.Tensor:
676
+ if self.steps is None:
677
+ raise ValueError("Number of steps must be provided.")
678
+ if isinstance(self.device, str):
679
+ self.device = torch.device(self.device)
680
+ self.config_values()
681
+ self.generate_sigmas_schedule()
682
+
683
+ self.predicted_stop_step = self.steps if None else self.original_steps
684
+ if self.N > len(self.sigs):
685
+ self.N = len(self.sigs)
686
+ self.log(f"[Sharpening Notice] Requested last {self.N} steps exceeds sequence length. Using entire sequence instead.")
687
+ self.min_visual_sigma = self.settings.get('min_visual_sigma', 10)
688
+ self.visual_sigma = max(0.8, self.sigma_min * self.min_visual_sigma)
689
+ self.safety_minimum_stop_step = self.settings.get('safety_minimum_stop_step', 10)
690
+ self.blend_sigma_sequence(sigs = self.sigs, sigmas_karras=self.sigmas_karras, sigmas_exponential=self.sigmas_exponential, pre_pass = True)
691
+ if torch.isnan(self.sigs).any() or torch.isinf(self.sigs).any():
692
+ raise ValueError("Invalid sigma values detected (NaN or Inf).")
693
+ final_steps = self.sigs[:self.predicted_stop_step].to(self.device)
694
+ # Store the results for later use in compute_sigmas
695
+ self.final_steps = final_steps
696
+ self.final_sigmas_karras = self.sigmas_karras
697
+ self.final_sigmas_exponential = self.sigmas_exponential
698
+ self.log(f" Final Steps = {self.final_steps}. Predicted_stop_step = {self.predicted_stop_step}. Original requested steps = {self.steps}")
699
+ self.log(f"final sigmas karras: {self.final_sigmas_karras}")
700
+
701
+
702
+ def compute_sigmas(self)->torch.Tensor:
703
+ """
704
+ Scheduler function that blends sigma sequences using Karras and Exponential methods with adaptive parameters.
705
+
706
+ Parameters:
707
+ n (int): Number of steps.
708
+ sigma_min (float): Minimum sigma value.
709
+ sigma_max (float): Maximum sigma value.
710
+ device (torch.device): The device on which to perform computations (e.g., 'cuda' or 'cpu').
711
+ start_blend (float): Initial blend factor for dynamic blending.
712
+ end_bend (float): Final blend factor for dynamic blending.
713
+ sharpen_factor (float): Sharpening factor to be applied adaptively.
714
+ early_stopping_threshold (float): Threshold to trigger early stopping.
715
+ initial_step_size (float): Initial step size for adaptive step size calculation.
716
+ final_step_size (float): Final step size for adaptive step size calculation.
717
+ initial_noise_scale (float): Initial noise scale factor.
718
+ final_noise_scale (float): Final noise scale factor.
719
+ step_size_factor: Adjust to compensate for oversmoothing
720
+ noise_scale_factor: Adjust to provide more variation
721
+
722
+ Returns:
723
+ torch.Tensor: A tensor of blended sigma values.
724
+ """
725
+ acceptable_keys = [
726
+ "sigma_min", "sigma_max", "start_blend", "end_blend", "sharpness",
727
+ "early_stopping_threshold", "initial_step_size",
728
+ "final_step_size", "initial_noise_scale", "final_noise_scale",
729
+ "smooth_blend_factor", "step_size_factor", "noise_scale_factor", "rho"
730
+ ]
731
+
732
+ for key in acceptable_keys:
733
+ default_val = self.settings[key]
734
+ value = self.get_random_or_default(key, default_val)
735
+ setattr(self, key, value)
736
+
737
+ self.log(f"Using device: {self.device}")
738
+ self.config_values()
739
+ self.generate_sigmas_schedule()
740
+ if hasattr(self, 'final_sigmas_karras'):
741
+ self.sigs = torch.zeros_like(self.final_sigmas_karras).to(self.device)
742
+ else:
743
+ self.sigs = torch.zeros_like(self.sigmas_karras).to(self.device)
744
+
745
+
746
+ #self.blend_sigma_sequence(sigs=self.sigs, sigmas_karras=self.sigmas_karras, sigmas_exponential=self.sigmas_exponential, pre_pass = False)
747
+ self.blend_sigma_sequence(
748
+ sigs=self.sigs,
749
+ sigmas_karras=self.final_sigmas_karras if hasattr(self, 'final_sigmas_karras') else self.sigmas_karras,
750
+ sigmas_exponential=self.final_sigmas_exponential if hasattr(self, 'final_sigmas_exponential') else self.sigmas_exponential,
751
+ pre_pass=False
752
+ )
753
+ self.sigma_variance = torch.var(self.sigs).item()
754
+ if self.sharpen_mode in ['last_n', 'both']:
755
+ if self.sigma_variance < self.sigma_variance_threshold:
756
+ # Apply full sharpening
757
+ self.sharpen_mask = torch.where(self.sigs < self.sigma_min * 1.5, self.sharpness, 1.0).to(self.device)
758
+ sharpen_indices = torch.where(self.sharpen_mask < 1.0)[0].tolist()
759
+ self.sigs = self.sigs * self.sharpen_mask
760
+ self.log(f"[Sharpen Mask] Full sharpening applied (low variance). Steps: {sharpen_indices}")
761
+ else:
762
+ # Apply sharpening only to the last N steps
763
+ recent_sigs = self.sigs[-self.N:]
764
+ sharpen_mask = torch.where(recent_sigs < self.sigma_min * 1.5, self.sharpness, 1.0).to(self.device)
765
+ sharpen_indices = torch.where(sharpen_mask < 1.0)[0].tolist()
766
+ self.sigs[-self.N:] = recent_sigs * sharpen_mask
767
+
768
+ # Now loop per step if desired (safely inside this block)
769
+ for j in range(len(self.sigs) - self.N, len(self.sigs)):
770
+ if self.sigs[j] < self.sigma_min * 1.5:
771
+ old_value = self.sigs[j].item()
772
+ self.sigs[j] = self.sigs[j] * self.sharpness
773
+ self.log(f"[Sharpening] Step {j+1}: Applied sharpening. Sigma changed from {old_value:.6f} to {self.sigs[j].item():.6f}")
774
+ else:
775
+ self.log(f"[Sharpening] Step {j+1}: No sharpening applied. Sigma: {self.sigs[j].item():.6f}")
776
+
777
+ if self.sharpen_mode in ['full', 'both']:
778
+ # Optional: Additional full sharpening (if needed)
779
+ self.sharpen_mask = torch.where(self.sigs < self.sigma_min * 1.5, self.sharpness, 1.0).to(self.device)
780
+ sharpen_indices = torch.where(self.sharpen_mask < 1.0)[0].tolist()
781
+ self.sigs = self.sigs * self.sharpen_mask
782
+ self.log(f"[Sharpen Mask] Full sharpening applied at steps: {sharpen_indices}")
783
+
784
+ #self.log(f"[DEBUG]Final Output: Skip Prepass: {self.skip_prepass}. Original requested steps: {self.original_steps}. Self.steps = {self.steps} for tensor sigs: {self.sigs})")
785
+ return self.sigs.to(self.device)
sd_simple_kes_v2/validate_config.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, Any
2
+
3
+ RANDOMIZATION_TYPE_ALIASES = {
4
+ # Asymmetric
5
+ 'asymmetric': 'asymmetric', 'assym': 'asymmetric', 'a': 'asymmetric', 'asym': 'asymmetric', 'A': 'asymmetric',
6
+ # Symmetric
7
+ 'symmetric': 'symmetric', 'sym': 'symmetric', 's': 'symmetric', 'S': 'symmetric',
8
+ # Logarithmic
9
+ 'logarithmic': 'logarithmic', 'log': 'logarithmic', 'l': 'logarithmic', 'L': 'logarithmic',
10
+ # Exponential
11
+ 'exponential': 'exponential', 'exp': 'exponential', 'e': 'exponential', 'E': 'exponential',
12
+ }
13
+
14
+ DEFAULT_RANDOMIZATION_TYPE = 'asymmetric'
15
+ DEFAULT_RANDOMIZATION_PERCENT = 0.2
16
+
17
+ # Base default values
18
+ BASE_DEFAULTS = {
19
+ 'sigma_min': 0.05,
20
+ 'sigma_max': 27.5,
21
+ 'start_blend': 0.1,
22
+ 'end_blend': 0.5,
23
+ 'sharpness': 1.0,
24
+ 'early_stopping_threshold': 0.01,
25
+ 'initial_step_size': 0.9,
26
+ 'final_step_size': 0.2,
27
+ 'initial_noise_scale': 1.25,
28
+ 'final_noise_scale': 0.8,
29
+ 'smooth_blend_factor': 9.0,
30
+ 'step_size_factor': 0.8,
31
+ 'noise_scale_factor': 0.8,
32
+ 'rho': 8.0
33
+ }
34
+
35
+ def validate_config(config: Dict[str, Any], logger: Optional[Any] = None) -> Dict[str, Any]:
36
+ updated_config = config.copy()
37
+
38
+ def log(message):
39
+ if logger:
40
+ logger.log(message)
41
+ else:
42
+ print(message)
43
+
44
+ # Step 1: Set all base defaults if missing
45
+ for key, base_value in BASE_DEFAULTS.items():
46
+ if key not in updated_config:
47
+ updated_config[key] = base_value
48
+ log(f"[Config Correction] {key} missing. Set to base default: {base_value}")
49
+
50
+ # Ensure _rand flag exists and is a boolean
51
+ rand_flag = f"{key}_rand"
52
+ if rand_flag not in updated_config or not isinstance(updated_config.get(rand_flag), bool):
53
+ updated_config[rand_flag] = False
54
+ log(f"[Config Correction] {rand_flag} missing or invalid. Set to False.")
55
+
56
+ # Ensure _enable_randomization_type flag exists and is a boolean
57
+ randomization_flag = f"{key}_enable_randomization_type"
58
+ if randomization_flag not in updated_config or not isinstance(updated_config.get(randomization_flag), bool):
59
+ updated_config[randomization_flag] = False
60
+ log(f"[Config Correction] {randomization_flag} missing or invalid. Set to False.")
61
+
62
+ # Ensure randomization_type exists
63
+ randomization_type_key = f"{key}_randomization_type"
64
+ if randomization_type_key not in updated_config:
65
+ updated_config[randomization_type_key] = DEFAULT_RANDOMIZATION_TYPE
66
+ log(f"[Config Correction] {randomization_type_key} missing. Set to '{DEFAULT_RANDOMIZATION_TYPE}'.")
67
+
68
+ # Ensure randomization_percent exists
69
+ randomization_percent_key = f"{key}_randomization_percent"
70
+ if randomization_percent_key not in updated_config:
71
+ updated_config[randomization_percent_key] = DEFAULT_RANDOMIZATION_PERCENT
72
+ log(f"[Config Correction] {randomization_percent_key} missing. Set to {DEFAULT_RANDOMIZATION_PERCENT}.")
73
+
74
+ # Ensure _rand_min and _rand_max exist
75
+ min_key = f"{key}_rand_min"
76
+ max_key = f"{key}_rand_max"
77
+ percent = updated_config[randomization_percent_key]
78
+
79
+ if min_key not in updated_config:
80
+ updated_config[min_key] = updated_config[key] * (1 - percent)
81
+ log(f"[Config Correction] {min_key} missing. Auto-calculated from base.")
82
+
83
+ if max_key not in updated_config:
84
+ updated_config[max_key] = updated_config[key] * (1 + percent)
85
+ log(f"[Config Correction] {max_key} missing. Auto-calculated from base.")
86
+
87
+ log("[Config Validation] Config validated and missing values filled successfully.")
88
+ return updated_config