dikdimon commited on
Commit
d1e6fe8
·
verified ·
1 Parent(s): add8787

Upload sd_simple_kes_v1 using SD-Hub

Browse files
Files changed (31) hide show
  1. sd_simple_kes_v1/__pycache__/get_sigmas.cpython-310.pyc +0 -0
  2. sd_simple_kes_v1/__pycache__/simple_kes_v1.cpython-310.pyc +0 -0
  3. sd_simple_kes_v1/__pycache__/validate_config.cpython-310.pyc +0 -0
  4. sd_simple_kes_v1/get_sigmas.py +32 -0
  5. sd_simple_kes_v1/image_generation_data/generation_log_20250706_075558.txt +32 -0
  6. sd_simple_kes_v1/image_generation_data/generation_log_20250706_084556.txt +24 -0
  7. sd_simple_kes_v1/image_generation_data/generation_log_20250706_084826.txt +24 -0
  8. sd_simple_kes_v1/image_generation_data/generation_log_20250706_094358.txt +24 -0
  9. sd_simple_kes_v1/image_generation_data/generation_log_20250706_094514.txt +24 -0
  10. sd_simple_kes_v1/image_generation_data/generation_log_20250706_094707.txt +24 -0
  11. sd_simple_kes_v1/image_generation_data/generation_log_20250706_095038.txt +24 -0
  12. sd_simple_kes_v1/image_generation_data/generation_log_20250706_095104.txt +24 -0
  13. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061628.txt +24 -0
  14. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061655.txt +24 -0
  15. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061722.txt +24 -0
  16. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061750.txt +24 -0
  17. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061817.txt +24 -0
  18. sd_simple_kes_v1/image_generation_data/generation_log_20250712_061845.txt +24 -0
  19. sd_simple_kes_v1/image_generation_data/generation_log_20250726_133513.txt +24 -0
  20. sd_simple_kes_v1/image_generation_data/generation_log_20250813_084451.txt +24 -0
  21. sd_simple_kes_v1/image_generation_data/generation_log_20250813_084601.txt +24 -0
  22. sd_simple_kes_v1/image_generation_data/generation_log_20250813_084703.txt +24 -0
  23. sd_simple_kes_v1/image_generation_data/generation_log_20250813_084836.txt +24 -0
  24. sd_simple_kes_v1/image_generation_data/generation_log_20250813_085028.txt +24 -0
  25. sd_simple_kes_v1/image_generation_data/generation_log_20250813_085243.txt +24 -0
  26. sd_simple_kes_v1/image_generation_data/generation_log_20250813_085423.txt +24 -0
  27. sd_simple_kes_v1/image_generation_data/generation_log_20250813_090056.txt +24 -0
  28. sd_simple_kes_v1/kes_config/default_config.yaml +123 -0
  29. sd_simple_kes_v1/prompt_parser.py +1599 -0
  30. sd_simple_kes_v1/simple_kes_v1.py +457 -0
  31. sd_simple_kes_v1/validate_config.py +65 -0
sd_simple_kes_v1/__pycache__/get_sigmas.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
sd_simple_kes_v1/__pycache__/simple_kes_v1.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
sd_simple_kes_v1/__pycache__/validate_config.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
sd_simple_kes_v1/get_sigmas.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import linspace, tensor
4
+
5
+ #source files are from the diffusers library. Modified file to remove error messages on console:
6
+ '''
7
+ UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
8
+ min_inv_rho = tensor(sigma_min, device=device) ** (1 / rho)
9
+
10
+ '''
11
+ def append_zero(x):
12
+ return torch.cat([x, x.new_zeros([1])])
13
+
14
+ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
15
+ """Constructs the noise schedule of Karras et al. (2022)."""
16
+ ramp = linspace(0, 1, n, device=device)
17
+ #min_inv_rho = tensor(sigma_min, device=device) ** (1 / rho)
18
+ #max_inv_rho = tensor(sigma_max, device=device) ** (1 / rho)
19
+ def _to_tensor(val, device):
20
+ return val.to(device) if isinstance(val, torch.Tensor) else torch.tensor(val, device=device)
21
+
22
+ min_inv_rho = _to_tensor(sigma_min, device) ** (1 / rho)
23
+ max_inv_rho = _to_tensor(sigma_max, device) ** (1 / rho)
24
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
25
+ return append_zero(sigmas).to(device)
26
+
27
+
28
+
29
+ def get_sigmas_exponential(n, sigma_min, sigma_max, device='cpu'):
30
+ """Constructs an exponential noise schedule."""
31
+ sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
32
+ return append_zero(sigmas)
sd_simple_kes_v1/image_generation_data/generation_log_20250706_075558.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 40.47247420824197 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 40.47247420824197
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 5.3338640899006275 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 5.3338640899006275
18
+ [Auto Sigma Min] sigma_min set to 0.20236237104120985 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.20236237347126007, sigma_max=40.47247314453125
21
+ Generated sigma sequences. Karras: tensor([40.4725, 36.0006, 31.9388, 28.2574, 24.9286, 21.9256, 19.2235, 16.7984,
22
+ 14.6280, 12.6912, 10.9681, 9.4401, 8.0898, 6.9009, 5.8579, 4.9469,
23
+ 4.1545, 3.4684, 2.8774, 2.3710, 1.9395, 1.5741, 1.2668, 1.0101,
24
+ 0.7973, 0.6225, 0.4802, 0.3656, 0.2742, 0.2024, 0.0000],
25
+ device='cuda:0'), Exponential: tensor([40.4725, 33.7143, 28.0846, 23.3949, 19.4884, 16.2342, 13.5234, 11.2652,
26
+ 9.3841, 7.8171, 6.5118, 5.4244, 4.5187, 3.7641, 3.1356, 2.6120,
27
+ 2.1758, 1.8125, 1.5099, 1.2577, 1.0477, 0.8728, 0.7270, 0.6056,
28
+ 0.5045, 0.4203, 0.3501, 0.2916, 0.2429, 0.2024, 0.0000],
29
+ device='cuda:0')
30
+ [Progress Initialized] Created progress tensor with 30 steps (excluding terminal step) on device: cuda
31
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([30]) on device: cuda
32
+ [Sharpen Mask] Sharpening applied at steps: [23, 24, 25, 26, 27, 28, 29, 30]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_084556.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 20.31486888381647 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 20.31486888381647
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.341474071879316 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.341474071879316
18
+ [Auto Sigma Min] sigma_min set to 0.10157434441908235 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.1015743613243103, sigma_max=20.31486701965332
21
+ Generated sigma sequences. Karras: tensor([20.3149, 14.9509, 10.8332, 7.7153, 5.3903, 3.6859, 2.4600, 1.5972, 1.0048, 0.6094, 0.3541, 0.1956, 0.1016, 0.0000], device='cuda:0'), Exponential: tensor([20.3149, 13.0636, 8.4006, 5.4020, 3.4738, 2.2338, 1.4365, 0.9237, 0.5940, 0.3820, 0.2456, 0.1580, 0.1016, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_084826.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 47.511735883659846 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 47.511735883659846
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 5.411802739400299 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 5.411802739400299
18
+ [Auto Sigma Min] sigma_min set to 0.23755867941829922 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.23755867779254913, sigma_max=47.51173400878906
21
+ Generated sigma sequences. Karras: tensor([47.5117, 35.5816, 26.2156, 18.9651, 13.4400, 9.3042, 6.2706, 4.0968, 2.5811, 1.5574, 0.8921, 0.4793, 0.2376, 0.0000], device='cuda:0'), Exponential: tensor([47.5117, 30.5526, 19.6470, 12.6341, 8.1244, 5.2244, 3.3596, 2.1604, 1.3893, 0.8934, 0.5745, 0.3694, 0.2376, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_094358.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 22.70747556206838 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 22.70747556206838
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.114235966440336 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.114235966440336
18
+ [Auto Sigma Min] sigma_min set to 0.11353737781034191 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.11353737860918045, sigma_max=22.707477569580078
21
+ Generated sigma sequences. Karras: tensor([22.7075, 16.5162, 11.8351, 8.3430, 5.7759, 3.9193, 2.6007, 1.6829, 1.0586, 0.6447, 0.3783, 0.2126, 0.1135, 0.0000], device='cuda:0'), Exponential: tensor([22.7075, 14.6021, 9.3900, 6.0383, 3.8829, 2.4969, 1.6057, 1.0325, 0.6640, 0.4270, 0.2746, 0.1766, 0.1135, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_094514.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 30.093752214132895 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 30.093752214132895
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 5.278771579706032 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 5.278771579706032
18
+ [Auto Sigma Min] sigma_min set to 0.15046876107066448 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.15046875178813934, sigma_max=30.093753814697266
21
+ Generated sigma sequences. Karras: tensor([30.0938, 22.6017, 16.6984, 12.1118, 8.6041, 5.9691, 4.0299, 2.6360, 1.6614, 1.0018, 0.5725, 0.3061, 0.1505, 0.0000], device='cuda:0'), Exponential: tensor([30.0938, 19.3519, 12.4443, 8.0024, 5.1460, 3.3091, 2.1279, 1.3684, 0.8799, 0.5659, 0.3639, 0.2340, 0.1505, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_094707.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 15.16043243738741 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 15.16043243738741
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.529236701684537 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.529236701684537
18
+ [Auto Sigma Min] sigma_min set to 0.07580216218693706 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.07580216228961945, sigma_max=15.160433769226074
21
+ Generated sigma sequences. Karras: tensor([15.1604, 10.9661, 7.8176, 5.4849, 3.7815, 2.5572, 1.6926, 1.0938, 0.6881, 0.4199, 0.2475, 0.1402, 0.0758, 0.0000], device='cuda:0'), Exponential: tensor([15.1604, 9.7490, 6.2691, 4.0314, 2.5924, 1.6671, 1.0720, 0.6894, 0.4433, 0.2851, 0.1833, 0.1179, 0.0758, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_095038.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 32.577242192334715 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 32.577242192334715
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 8.06934320596871 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 8.06934320596871
18
+ [Auto Sigma Min] sigma_min set to 0.16288621096167358 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.16288620233535767, sigma_max=32.577239990234375
21
+ Generated sigma sequences. Karras: tensor([32.5772, 23.4117, 16.5892, 11.5756, 7.9427, 5.3503, 3.5315, 2.2790, 1.4342, 0.8774, 0.5200, 0.2972, 0.1629, 0.0000], device='cuda:0'), Exponential: tensor([32.5772, 20.9489, 13.4713, 8.6628, 5.5706, 3.5822, 2.3036, 1.4813, 0.9526, 0.6126, 0.3939, 0.2533, 0.1629, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250706_095104.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 48.89761928677526 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 48.89761928677526
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.552977828605041 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.552977828605041
18
+ [Auto Sigma Min] sigma_min set to 0.2444880964338763 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.24448810517787933, sigma_max=48.89762496948242
21
+ Generated sigma sequences. Karras: tensor([48.8976, 35.8633, 25.9015, 18.3913, 12.8144, 8.7423, 5.8245, 3.7778, 2.3763, 1.4429, 0.8408, 0.4668, 0.2445, 0.0000], device='cuda:0'), Exponential: tensor([48.8976, 31.4438, 20.2201, 13.0026, 8.3614, 5.3768, 3.4576, 2.2234, 1.4298, 0.9194, 0.5912, 0.3802, 0.2445, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061628.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 30.331830824484825 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 30.331830824484825
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.2990362567959135 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.2990362567959135
18
+ [Auto Sigma Min] sigma_min set to 0.15165915412242412 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.15165916085243225, sigma_max=30.33182716369629
21
+ Generated sigma sequences. Karras: tensor([30.3318, 26.3462, 22.8210, 19.7105, 16.9729, 14.5700, 12.4665, 10.6304, 9.0326, 7.6465, 6.4479, 5.4151, 4.5283, 3.7698, 3.1235, 2.5752, 2.1120, 1.7226, 1.3968, 1.1256, 0.9011, 0.7163, 0.5653, 0.4425, 0.3435, 0.2642, 0.2012, 0.1517, 0.0000], device='cuda:0'), Exponential: tensor([30.3318, 24.9273, 20.4858, 16.8356, 13.8358, 11.3706, 9.3446, 7.6795, 6.3112, 5.1867, 4.2625, 3.5030, 2.8788, 2.3659, 1.9443, 1.5979, 1.3132, 1.0792, 0.8869, 0.7289, 0.5990, 0.4923, 0.4046, 0.3325, 0.2732, 0.2246, 0.1845, 0.1517, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061655.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 43.23162254607824 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 43.23162254607824
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.000206094407517 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.000206094407517
18
+ [Auto Sigma Min] sigma_min set to 0.2161581127303912 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.21615813672542572, sigma_max=43.231624603271484
21
+ Generated sigma sequences. Karras: tensor([43.2316, 37.8945, 33.1181, 28.8543, 25.0581, 21.6876, 18.7037, 16.0700, 13.7529, 11.7212, 9.9460, 8.4008, 7.0610, 5.9042, 4.9099, 4.0592, 3.3350, 2.7219, 2.2057, 1.7738, 1.4148, 1.1186, 0.8759, 0.6788, 0.5201, 0.3936, 0.2939, 0.2162, 0.0000], device='cuda:0'), Exponential: tensor([43.2316, 35.5286, 29.1981, 23.9956, 19.7201, 16.2063, 13.3187, 10.9456, 8.9953, 7.3925, 6.0753, 4.9928, 4.1032, 3.3721, 2.7712, 2.2775, 1.8717, 1.5382, 1.2641, 1.0389, 0.8538, 0.7016, 0.5766, 0.4739, 0.3894, 0.3201, 0.2630, 0.2162, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061722.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 45.36714554920688 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 45.36714554920688
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.617959675984846 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.617959675984846
18
+ [Auto Sigma Min] sigma_min set to 0.22683572774603442 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.2268357276916504, sigma_max=45.36714172363281
21
+ Generated sigma sequences. Karras: tensor([45.3671, 39.3326, 34.0079, 29.3210, 25.2056, 21.6016, 18.4540, 15.7127, 13.3322, 11.2716, 9.4934, 7.9642, 6.6536, 5.5346, 4.5829, 3.7766, 3.0966, 2.5255, 2.0483, 1.6514, 1.3231, 1.0531, 0.8322, 0.6528, 0.5080, 0.3919, 0.2996, 0.2268, 0.0000], device='cuda:0'), Exponential: tensor([45.3671, 37.2836, 30.6404, 25.1809, 20.6942, 17.0069, 13.9766, 11.4862, 9.4396, 7.7577, 6.3754, 5.2394, 4.3059, 3.5387, 2.9081, 2.3900, 1.9641, 1.6142, 1.3265, 1.0902, 0.8959, 0.7363, 0.6051, 0.4973, 0.4087, 0.3359, 0.2760, 0.2268, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061750.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 11.087427883010665 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 11.087427883010665
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.447578289721838 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.447578289721838
18
+ [Auto Sigma Min] sigma_min set to 0.05543713941505333 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.05543714761734009, sigma_max=11.087427139282227
21
+ Generated sigma sequences. Karras: tensor([11.0874, 9.6852, 8.4358, 7.3254, 6.3411, 5.4709, 4.7039, 4.0298, 3.4392, 2.9234, 2.4746, 2.0854, 1.7493, 1.4601, 1.2124, 1.0012, 0.8219, 0.6706, 0.5435, 0.4374, 0.3493, 0.2767, 0.2173, 0.1691, 0.1302, 0.0992, 0.0746, 0.0554, 0.0000], device='cuda:0'), Exponential: tensor([11.0874, 9.1119, 7.4883, 6.1540, 5.0575, 4.1564, 3.4158, 2.8072, 2.3070, 1.8959, 1.5581, 1.2805, 1.0523, 0.8648, 0.7107, 0.5841, 0.4800, 0.3945, 0.3242, 0.2664, 0.2190, 0.1799, 0.1479, 0.1215, 0.0999, 0.0821, 0.0675, 0.0554, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061817.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 44.86829543350951 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 44.86829543350951
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.254884080290302 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.254884080290302
18
+ [Auto Sigma Min] sigma_min set to 0.22434147716754754 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.224341481924057, sigma_max=44.86829376220703
21
+ Generated sigma sequences. Karras: tensor([44.8683, 39.2502, 34.2352, 29.7700, 25.8046, 22.2927, 19.1914, 16.4610, 14.0646, 11.9684, 10.1412, 8.5543, 7.1814, 5.9986, 4.9840, 4.1176, 3.3814, 2.7592, 2.2361, 1.7989, 1.4360, 1.1366, 0.8915, 0.6925, 0.5322, 0.4043, 0.3032, 0.2243, 0.0000], device='cuda:0'), Exponential: tensor([44.8683, 36.8737, 30.3035, 24.9040, 20.4666, 16.8199, 13.8229, 11.3599, 9.3358, 7.6724, 6.3053, 5.1818, 4.2585, 3.4997, 2.8762, 2.3637, 1.9425, 1.5964, 1.3120, 1.0782, 0.8861, 0.7282, 0.5984, 0.4918, 0.4042, 0.3322, 0.2730, 0.2243, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250712_061845.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 32.700045187476825 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 32.700045187476825
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.762954933618958 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.762954933618958
18
+ [Auto Sigma Min] sigma_min set to 0.1635002259373841 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.16350023448467255, sigma_max=32.70004653930664
21
+ Generated sigma sequences. Karras: tensor([32.7000, 28.3276, 24.4735, 21.0845, 18.1119, 15.5112, 13.2421, 11.2678, 9.5550, 8.0736, 6.7965, 5.6991, 4.7593, 3.9576, 3.2762, 2.6993, 2.2130, 1.8049, 1.4640, 1.1806, 0.9462, 0.7535, 0.5959, 0.4678, 0.3644, 0.2815, 0.2156, 0.1635, 0.0000], device='cuda:0'), Exponential: tensor([32.7000, 26.8735, 22.0852, 18.1501, 14.9161, 12.2583, 10.0741, 8.2791, 6.8040, 5.5916, 4.5953, 3.7765, 3.1036, 2.5506, 2.0961, 1.7227, 1.4157, 1.1635, 0.9562, 0.7858, 0.6458, 0.5307, 0.4361, 0.3584, 0.2946, 0.2421, 0.1989, 0.1635, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 28 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([28]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [21, 22, 23, 24, 25, 26, 27, 28]
sd_simple_kes_v1/image_generation_data/generation_log_20250726_133513.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 37.06797949691645 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 37.06797949691645
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 9.460897838524227 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 9.460897838524227
18
+ [Auto Sigma Min] sigma_min set to 0.18533989748458224 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.18533989787101746, sigma_max=37.06798553466797
21
+ Generated sigma sequences. Karras: tensor([37.0680, 30.1958, 24.4862, 19.7622, 15.8705, 12.6790, 10.0742, 7.9588, 6.2498, 4.8768, 3.7801, 2.9094, 2.2227, 1.6847, 1.2663, 0.9434, 0.6962, 0.5086, 0.3676, 0.2626, 0.1853, 0.0000], device='cuda:0'), Exponential: tensor([37.0680, 28.4412, 21.8221, 16.7434, 12.8467, 9.8569, 7.5629, 5.8028, 4.4523, 3.4161, 2.6211, 2.0111, 1.5431, 1.1839, 0.9084, 0.6970, 0.5348, 0.4103, 0.3148, 0.2416, 0.1853, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 21 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([21]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [15, 16, 17, 18, 19, 20, 21]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_084451.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 10.856943669535708 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 10.856943669535708
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 8.08819414456137 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 8.08819414456137
18
+ [Auto Sigma Min] sigma_min set to 0.05428471834767854 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.0542847216129303, sigma_max=10.856942176818848
21
+ Generated sigma sequences. Karras: tensor([10.8569, 7.8007, 5.5264, 3.8555, 2.6451, 1.7815, 1.1758, 0.7587, 0.4775, 0.2922, 0.1732, 0.0990, 0.0543, 0.0000], device='cuda:0'), Exponential: tensor([10.8569, 6.9816, 4.4895, 2.8870, 1.8565, 1.1938, 0.7677, 0.4937, 0.3175, 0.2041, 0.1313, 0.0844, 0.0543, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_084601.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 41.49888344830137 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 41.49888344830137
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 8.988685067558771 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 8.988685067558771
18
+ [Auto Sigma Min] sigma_min set to 0.20749441724150686 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.20749442279338837, sigma_max=41.49888229370117
21
+ Generated sigma sequences. Karras: tensor([41.4989, 29.5391, 20.7470, 14.3626, 9.7881, 6.5575, 4.3118, 2.7777, 1.7493, 1.0744, 0.6417, 0.3714, 0.2075, 0.0000], device='cuda:0'), Exponential: tensor([41.4989, 26.6860, 17.1606, 11.0352, 7.0962, 4.5632, 2.9344, 1.8870, 1.2134, 0.7803, 0.5018, 0.3227, 0.2075, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_084703.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 41.76420171760242 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 41.76420171760242
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 7.061781584185445 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 7.061781584185445
18
+ [Auto Sigma Min] sigma_min set to 0.20882100858801209 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.20882099866867065, sigma_max=41.764198303222656
21
+ Generated sigma sequences. Karras: tensor([41.7642, 30.3994, 21.7985, 15.3763, 10.6510, 7.2307, 4.7997, 3.1065, 1.9540, 1.1897, 0.6976, 0.3916, 0.2088, 0.0000], device='cuda:0'), Exponential: tensor([41.7642, 26.8566, 17.2703, 11.1057, 7.1416, 4.5924, 2.9532, 1.8991, 1.2212, 0.7853, 0.5050, 0.3247, 0.2088, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_084836.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 24.8344928619694 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 24.8344928619694
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 6.906271264950195 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 6.906271264950195
18
+ [Auto Sigma Min] sigma_min set to 0.124172464309847 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.12417245656251907, sigma_max=24.83449363708496
21
+ Generated sigma sequences. Karras: tensor([24.8345, 18.1169, 13.0184, 9.2006, 6.3841, 4.3402, 2.8840, 1.8677, 1.1748, 0.7147, 0.4183, 0.2341, 0.1242, 0.0000], device='cuda:0'), Exponential: tensor([24.8345, 15.9699, 10.2695, 6.6039, 4.2466, 2.7308, 1.7561, 1.1292, 0.7262, 0.4670, 0.3003, 0.1931, 0.1242, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_085028.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 32.11483148741722 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 32.11483148741722
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 8.54421983240806 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 8.54421983240806
18
+ [Auto Sigma Min] sigma_min set to 0.1605741574370861 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.16057415306568146, sigma_max=32.114830017089844
21
+ Generated sigma sequences. Karras: tensor([32.1148, 22.9607, 16.1922, 11.2503, 7.6909, 5.1653, 3.4022, 2.1934, 1.3808, 0.8465, 0.5038, 0.2899, 0.1606, 0.0000], device='cuda:0'), Exponential: tensor([32.1148, 20.6516, 13.2801, 8.5398, 5.4916, 3.5314, 2.2709, 1.4603, 0.9390, 0.6039, 0.3883, 0.2497, 0.1606, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_085243.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 25.33607589764972 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 25.33607589764972
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 5.103531052254309 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 5.103531052254309
18
+ [Auto Sigma Min] sigma_min set to 0.12668037948824862 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.126680389046669, sigma_max=25.336074829101562
21
+ Generated sigma sequences. Karras: tensor([25.3361, 19.1034, 14.1676, 10.3134, 7.3512, 5.1152, 3.4619, 2.2683, 1.4306, 0.8618, 0.4910, 0.2608, 0.1267, 0.0000], device='cuda:0'), Exponential: tensor([25.3361, 16.2925, 10.4769, 6.7372, 4.3324, 2.7860, 1.7915, 1.1521, 0.7408, 0.4764, 0.3063, 0.1970, 0.1267, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_085423.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 33.4340927821948 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 33.4340927821948
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 9.108573697102198 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 9.108573697102198
18
+ [Auto Sigma Min] sigma_min set to 0.167170463910974 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.1671704649925232, sigma_max=33.4340934753418
21
+ Generated sigma sequences. Karras: tensor([33.4341, 23.7717, 16.6789, 11.5357, 7.8554, 5.2594, 3.4568, 2.2265, 1.4023, 0.8617, 0.5151, 0.2986, 0.1672, 0.0000], device='cuda:0'), Exponential: tensor([33.4341, 21.4999, 13.8256, 8.8906, 5.7171, 3.6764, 2.3641, 1.5203, 0.9776, 0.6287, 0.4043, 0.2600, 0.1672, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/image_generation_data/generation_log_20250813_090056.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [Config Validation] Config validated and missing values filled successfully.
2
+ [Randomization] sigma_min: No randomization applied. Using default value: 0.13757067353874633
3
+ [Random Range] sigma_max: Picked random value 27.46179158324268 between 10 and 50
4
+ [Randomization] sigma_max: Applied min/max randomization. Final value: 27.46179158324268
5
+ [Randomization] start_blend: No randomization applied. Using default value: 0.10237033381422371
6
+ [Randomization] end_blend: No randomization applied. Using default value: 0.5627048166151302
7
+ [Randomization] sharpness: No randomization applied. Using default value: 0.9070742264431869
8
+ [Randomization] early_stopping_threshold: No randomization applied. Using default value: 0.01
9
+ [Randomization] initial_step_size: No randomization applied. Using default value: 0.9
10
+ [Randomization] final_step_size: No randomization applied. Using default value: 0.2
11
+ [Randomization] initial_noise_scale: No randomization applied. Using default value: 1.25
12
+ [Randomization] final_noise_scale: No randomization applied. Using default value: 0.8
13
+ [Randomization] smooth_blend_factor: No randomization applied. Using default value: 9.426004103284665
14
+ [Randomization] step_size_factor: No randomization applied. Using default value: 0.80814932869181
15
+ [Randomization] noise_scale_factor: No randomization applied. Using default value: 0.8113992828873163
16
+ [Random Range] rho: Picked random value 8.143645894730136 between 5.0 and 10.0
17
+ [Randomization] rho: Applied min/max randomization. Final value: 8.143645894730136
18
+ [Auto Sigma Min] sigma_min set to 0.1373089579162134 using scale factor 200
19
+ Using device: cuda
20
+ Final sigmas: sigma_min=0.1373089700937271, sigma_max=27.4617919921875
21
+ Generated sigma sequences. Karras: tensor([27.4618, 19.7189, 13.9617, 9.7354, 6.6759, 4.4948, 2.9658, 1.9136, 1.2043, 0.7370, 0.4371, 0.2501, 0.1373, 0.0000], device='cuda:0'), Exponential: tensor([27.4618, 17.6594, 11.3560, 7.3025, 4.6959, 3.0197, 1.9418, 1.2487, 0.8030, 0.5164, 0.3321, 0.2135, 0.1373, 0.0000], device='cuda:0')
22
+ [Progress Initialized] Created progress tensor with 13 steps (excluding terminal step) on device: cuda
23
+ [Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([13]) on device: cuda
24
+ [Sharpen Mask] Sharpening applied at steps: [10, 11, 12, 13]
sd_simple_kes_v1/kes_config/default_config.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ device: "cuda"
2
+ debug: true
3
+ global_randomize: false
4
+ #
5
+ #randomization_type: asymmetric # Options: asymmetric, symmetric, exponential, or log #off is no longer a valid choice. Use the boolean flag for each variable to turn it on/off
6
+ #randomization_percent: 0.2 #Set to a decimal value
7
+ #
8
+ sigma_scale_factor: 200
9
+ sigma_auto_enabled: true
10
+ sigma_auto_mode: sigma_min # Options: sigma_min, sigma_max
11
+ #
12
+ rho_rand: true
13
+ rho_rand_min: 5.00
14
+ rho_rand_max: 10.00
15
+ rho: 7.571656624637901
16
+ rho_enable_randomization_type: false
17
+ rho_randomization_type: asymmetric
18
+ rho_randomization_percent: 0.2
19
+ #
20
+ sigma_min_rand: false
21
+ sigma_min_rand_min: 0.001
22
+ sigma_min_rand_max: 0.02
23
+ sigma_min: 0.13757067353874633
24
+ sigma_min_enable_randomization_type: false
25
+ sigma_min_randomization_type: asymmetric
26
+ sigma_min_randomization_percent: 0.2
27
+ #
28
+ sigma_max_rand: true
29
+ sigma_max_rand_min: 10
30
+ sigma_max_rand_max: 50
31
+ sigma_max: 28.215968374619045
32
+ sigma_max_enable_randomization_type: false
33
+ sigma_max_randomization_type: asymmetric
34
+ sigma_max_randomization_percent: 0.25
35
+ #
36
+ start_blend_rand: false
37
+ start_blend_rand_min: 0.05
38
+ start_blend_rand_max: 0.2
39
+ start_blend: 0.10237033381422371
40
+ start_blend_enable_randomization_type: false
41
+ start_blend_randomization_type: asymmetric
42
+ start_blend_randomization_percent: 0.2
43
+ #
44
+ end_blend_rand: false
45
+ end_blend_rand_min: 0.4
46
+ end_blend_rand_max: 0.8
47
+ end_blend: 0.5627048166151302
48
+ end_blend_enable_randomization_type: false
49
+ end_blend_randomization_type: asymmetric
50
+ end_blend_randomization_percent: 0.2
51
+ #
52
+ sharpness_rand: false
53
+ sharpness_rand_min: 0.85
54
+ sharpness_rand_max: 1.0
55
+ sharpness: 0.9070742264431869
56
+ sharpness_enable_randomization_type: false
57
+ sharpness_randomization_type: asymmetric
58
+ sharpness_randomization_percent: 0.2
59
+ #
60
+ early_stopping_threshold_rand: false
61
+ early_stopping_threshold_rand_min: 0.001
62
+ early_stopping_threshold_rand_max: 0.02
63
+ early_stopping_threshold: 0.01
64
+ early_stopping_threshold_enable_randomization_type: false
65
+ early_stopping_threshold_randomization_type: asymmetric
66
+ early_stopping_threshold_randomization_percent: 0.2
67
+
68
+ #
69
+ initial_step_size_rand: false
70
+ initial_step_size_rand_min: 0.7
71
+ initial_step_size_rand_max: 1.0
72
+ initial_step_size: 0.9
73
+ initial_step_size_enable_randomization_type: false
74
+ initial_step_size_randomization_type: asymmetric
75
+ initial_step_size_randomization_percent: 0.2
76
+ #
77
+ final_step_size_rand: false
78
+ final_step_size_rand_min: 0.1
79
+ final_step_size_rand_max: 0.3
80
+ final_step_size: 0.20
81
+ final_step_size_enable_randomization_type: false
82
+ final_step_size_randomization_type: asymmetric
83
+ final_step_size_randomization_percent: 0.2
84
+ #
85
+ initial_noise_scale_rand: false
86
+ initial_noise_scale_rand_min: 1.0
87
+ initial_noise_scale_rand_max: 1.5
88
+ initial_noise_scale: 1.25
89
+ initial_noise_scale_enable_randomization_type: false
90
+ initial_noise_scale_randomization_type: asymmetric
91
+ initial_noise_scale_randomization_percent: 0.2
92
+ #
93
+ final_noise_scale_rand: false
94
+ final_noise_scale_rand_min: 0.6
95
+ final_noise_scale_rand_max: 1.0
96
+ final_noise_scale: 0.80
97
+ final_noise_scale_enable_randomization_type: false
98
+ final_noise_scale_randomization_type: asymmetric
99
+ final_noise_scale_randomization_percent: 0.2
100
+ #
101
+ smooth_blend_factor_rand: false
102
+ smooth_blend_factor_rand_min: 6
103
+ smooth_blend_factor_rand_max: 11
104
+ smooth_blend_factor: 9.426004103284665
105
+ smooth_blend_factor_enable_randomization_type: false
106
+ smooth_blend_factor_randomization_type: asymmetric
107
+ smooth_blend_factor_randomization_percent: 0.2
108
+ #
109
+ step_size_factor_rand: false
110
+ step_size_factor_rand_min: 0.65
111
+ step_size_factor_rand_max: 0.85
112
+ step_size_factor: 0.80814932869181
113
+ step_size_factor_enable_randomization_type: false
114
+ step_size_factor_randomization_type: asymmetric
115
+ step_size_factor_randomization_percent: 0.2
116
+ #
117
+ noise_scale_factor_rand: false
118
+ noise_scale_factor_rand_min: 0.75
119
+ noise_scale_factor_rand_max: 0.95
120
+ noise_scale_factor: 0.8113992828873163
121
+ noise_scale_factor_enable_randomization_type: false
122
+ noise_scale_factor_randomization_type: asymmetric
123
+ noise_scale_factor_randomization_percent: 0.2
sd_simple_kes_v1/prompt_parser.py ADDED
@@ -0,0 +1,1599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import re
3
+ from collections import namedtuple
4
+ import lark
5
+ import random
6
+ from functools import lru_cache
7
+ import hashlib
8
+ from itertools import product
9
+
10
+ import os
11
+ import logging
12
+ logger = logging.getLogger(__name__) # не настраиваем basicConfig в библиотеке
13
+
14
+ # Фиче-флаги (можно переопределять через env):
15
+ def _env_bool(name: str, default: str = "0") -> bool:
16
+ v = str(os.getenv(name, default)).strip().lower()
17
+ return v not in ("0", "", "false", "no", "off")
18
+
19
+ SAFE_EMPTY = " "
20
+ ALLOW_EMPTY_ALTERNATE = _env_bool("ALLOW_EMPTY_ALTERNATE", "0")
21
+ EXPAND_ALTERNATE_PER_STEP = _env_bool("EXPAND_ALTERNATE_PER_STEP", "1")
22
+ GROUP_COMBO_LIMIT = int(os.getenv("GROUP_COMBO_LIMIT", "100"))
23
+ # подавлять одиночные ':' вне скобок в parse_prompt_attention
24
+ SUPPRESS_STANDALONE_COLON = _env_bool("SUPPRESS_STANDALONE_COLON", "1")
25
+ # Размер кэша (оставляем как у тебя, просто используем ниже)
26
+ CACHE_SIZE = int(os.getenv('PROMPT_PARSER_CACHE_SIZE', 4096))
27
+
28
+ # Грамматика Lark
29
+ # Грамматика Lark (динамически подставляем правило для alternate)
30
+ # динамическое правило для alternate
31
+ _alt_rule = r' "[" prompt ("|" prompt)* "]" ' # без пустых опций
32
+ if ALLOW_EMPTY_ALTERNATE:
33
+ _alt_rule = r' "[" prompt ("|" [prompt])+ "]" ' # разрешить пустые опции
34
+
35
+ # НЕ f-строка! просто r"""...""" + конкатенация
36
+ _grammar = r"""
37
+ !start: (prompt | /[][():|,]/+)*
38
+
39
+ prompt: (scheduled | emphasized | grouped
40
+ | alternate | alternate_distinct
41
+ | alternate2 | alternate1
42
+ | top_level_sequence3 | top_level_sequence | sequence
43
+ | compound | numbered | and_rule
44
+ | plain | WHITESPACE)*
45
+
46
+ !emphasized: "(" prompt ")"
47
+ | "(" prompt ":" prompt ")"
48
+ | "(" prompt ":" NUMBER ")"
49
+
50
+ scheduled: "[" [prompt (":" prompt)+] "]" ":" NUMBER (step_range_list | reverse_flag | step_range_list reverse_flag)?
51
+ reverse_flag: "reverse" | "r"
52
+ step_range_list: step_range ("," step_range)*
53
+ step_range: NUMBER "-" NUMBER | NUMBER "%" "-" NUMBER "%"
54
+
55
+ alternate: """ + _alt_rule + r"""
56
+ !alternate_distinct: "[" prompt ("|" prompt)* "]!"
57
+ alternate1: (prompt) "|" (prompt)+
58
+ alternate2: (plain | compound) ("|" (plain | compound))+
59
+
60
+ grouped: "{" ((NUMBER_Q | prompt | sequence | grouped) ("," | "|")?)+ "}"
61
+
62
+ top_level_sequence: prompt ("::" sequence)+ "!!" ("," plain)?
63
+ top_level_sequence3: prompt ":::" sequence (WHITESPACE* "," WHITESPACE* sequence)* "!!!" (WHITESPACE* "," WHITESPACE* (plain | sequence))*
64
+ sequence: prompt "::" prompt ("," | WHITESPACE)* nested_sequence* ("!" | ";")?
65
+ nested_sequence: "::" prompt ("," | WHITESPACE)* ("!" | ";" | "~")?
66
+
67
+ compound: /[a-zA-Z0-9]+(_[a-zA-Z0-9]+)+/
68
+ numbered: NUMBER_Q ("!" | "_")? (grouped | sequence | compound | and_rule | plain | alternate | alternate_distinct | alternate2 | alternate1)
69
+ and_rule: (plain | compound) ("&" (plain | compound))+
70
+ WHITESPACE: /\s+/
71
+ plain: /([^\\[\]\{\}\(\),&:!|]|\\.)+/
72
+
73
+ %import common.SIGNED_NUMBER -> NUMBER
74
+ %import common.INT -> NUMBER_Q
75
+ """
76
+
77
+ schedule_parser = lark.Lark(_grammar, start="start")
78
+
79
+
80
+ @lru_cache(maxsize=CACHE_SIZE)
81
+ def hash_tree(tree: lark.Tree | lark.Token) -> str:
82
+ if isinstance(tree, lark.Tree):
83
+ return hashlib.md5((tree.data + ''.join(hash_tree(c) for c in tree.children)).encode()).hexdigest()
84
+ return hashlib.md5(str(tree).encode()).hexdigest()
85
+
86
+ def resolve_tree(tree: lark.Tree | lark.Token, keep_spacing: bool = True) -> str:
87
+ if isinstance(tree, lark.Tree):
88
+ children = []
89
+ for child in tree.children:
90
+ if isinstance(child, lark.Token) and child.type == "WHITESPACE":
91
+ if keep_spacing:
92
+ children.append(" ")
93
+ continue
94
+ children.append(resolve_tree(child, keep_spacing))
95
+ result = "".join(str(c) for c in children if c)
96
+ if keep_spacing:
97
+ result = result.replace('\\n', ' ').replace('\\t', ' ')
98
+ result = result.replace('\r\n', '\n').replace('\r', '\n')
99
+ result = result.replace('\n', ' ').replace('\t', ' ')
100
+ result = result.replace('\\n', ' ').replace('\\t', ' ')
101
+ return re.sub(r"[\s\u2028\u2029]+", " ", result).strip() if keep_spacing else result.strip()
102
+ else:
103
+ return result.strip()
104
+ return str(tree).strip()
105
+
106
+ class ScheduleTransformer(lark.Transformer):
107
+ def __init__(self, total_steps: int, current_step: int = 1, seed: int | None = 42):
108
+ super().__init__()
109
+ self.total_steps = total_steps
110
+ self.current_step = current_step
111
+ self.seed = seed
112
+ self.rng = random.Random(seed) if seed is not None else random
113
+
114
+
115
+ def start(self, args):
116
+ s = "".join(str(arg) for arg in args if arg)
117
+ # owner::a::b!!, extra -> owner -> owner: a, b, extra
118
+ if "::" in s and "!!" in s and all(ch not in s for ch in "[]()"):
119
+ left, trailing = s.split("!!", 1)
120
+ owner, rest = left.split("::", 1)
121
+ descriptors = [x.strip(" ,~!;") for x in rest.split("::") if x.strip(" ,~!;")]
122
+ seq_text = f"{owner.strip()}: {', '.join(descriptors)}"
123
+ trailing_text = [t.strip(" ,") for t in trailing.split(",") if t.strip(" ,")]
124
+ out = f"{owner.strip()} -> {seq_text}"
125
+ if trailing_text:
126
+ out += f", {', '.join(trailing_text)}"
127
+ return out
128
+ # owner::a::b! -> owner: a, b
129
+ if "::" in s and (s.endswith("!") or s.endswith(";")) and all(ch not in s for ch in "[]()"):
130
+ owner, rest = s.split("::", 1)
131
+ rest = rest[:-1]
132
+ descriptors = [x.strip(" ,~!;") for x in rest.split("::") if x.strip(" ,~!;")]
133
+ return f"{owner.strip()}: {', '.join(descriptors)}"
134
+ return s
135
+
136
+ def prompt(self, args):
137
+ return "".join(str(arg) for arg in args if arg)
138
+
139
+ def plain(self, args):
140
+ return args[0].value
141
+
142
+ def compound(self, args):
143
+ return "_".join(str(arg) for arg in args)
144
+
145
+ def and_rule(self, args):
146
+ return " and ".join(resolve_tree(arg, keep_spacing=True) for arg in args if resolve_tree(arg))
147
+
148
+ def grouped(self, args):
149
+ return ", ".join(resolve_tree(arg, keep_spacing=True) for arg in args if resolve_tree(arg).strip(" ,|"))
150
+
151
+ def alternate(self, args):
152
+ vals = []
153
+ for arg in args:
154
+ s = resolve_tree(arg, keep_spacing=True)
155
+ if s or s == "":
156
+ vals.append(s)
157
+ return vals[(self.current_step - 1) % len(vals)] if vals else "empty_prompt"
158
+
159
+
160
+ def alternate_distinct(self, args):
161
+ options = [resolve_tree(arg, keep_spacing=True) for arg in args if resolve_tree(arg)]
162
+ return self.rng.choice(options) if options else "empty_prompt"
163
+
164
+ def alternate1(self, args):
165
+ options = [resolve_tree(arg, keep_spacing=True) for arg in args if resolve_tree(arg)]
166
+ return self.rng.choice(options) if options else "empty_prompt"
167
+
168
+ def alternate2(self, args):
169
+ options = [resolve_tree(a, keep_spacing=True) for a in args if resolve_tree(a, keep_spacing=True)]
170
+ suffix = options[0].split("_", 1)[1] if options and "_" in options[0] else ""
171
+ combined = [(o if "_" in o or not suffix else f"{o}_{suffix}") for o in options]
172
+ return "|".join(combined) if combined else "empty_prompt"
173
+
174
+
175
+
176
+ def numbered(self, args):
177
+ # количество
178
+ quantity = int(args[0])
179
+
180
+ # флаг distinct: принимаем и "!" и "_", учитываем, что это может быть Token
181
+ distinct = False # treat all numbered selections as distinct in visitor to avoid duplicate options due to parsing limits
182
+ if len(args) > 1:
183
+ mark = str(args[1])
184
+ distinct = mark in ("!", "_")
185
+
186
+ # цель (список опций/узел)
187
+ target = args[-1]
188
+
189
+ # собираем опции ровно один раз (не вызываем self.visit дважды!)
190
+ options = []
191
+ if isinstance(target, lark.Tree) and getattr(target, "data", None) in ("alternate", "alternate1", "alternate2"):
192
+ for child in target.children:
193
+ val = self.visit(child)
194
+ if val:
195
+ options.append(val)
196
+ elif isinstance(target, lark.Token):
197
+ options = [resolve_tree(target, keep_spacing=True)]
198
+ else:
199
+ for child in getattr(target, "children", []):
200
+ val = self.visit(child)
201
+ if val:
202
+ options.append(val)
203
+
204
+ if not options:
205
+ return "empty_prompt"
206
+
207
+ # выбор
208
+ if distinct:
209
+ # deterministic: take first N unique
210
+ seen = []
211
+ for opt in options:
212
+ if opt not in seen:
213
+ seen.append(opt)
214
+ if len(seen) >= quantity:
215
+ break
216
+ selected = seen if len(seen) >= quantity else seen + options[:max(0, quantity - len(seen))]
217
+ else:
218
+ selected = self.rng.choices(options, k=quantity)
219
+
220
+ return ", ".join(selected)
221
+
222
+ def sequence(self, args, parent=None):
223
+ owner = resolve_tree(args[0], keep_spacing=True) if parent is None else parent
224
+ descriptors = [resolve_tree(arg, keep_spacing=True).strip(" ,~!;") for arg in args[1:] if resolve_tree(arg).strip(" ,~!;")]
225
+ return f"{owner}: {', '.join(descriptors)}"
226
+
227
+ def top_level_sequence(self, args):
228
+ owner = resolve_tree(args[0], keep_spacing=True).strip()
229
+ sequences = []
230
+ trailing_text = []
231
+ for child in args[1:]:
232
+ if isinstance(child, lark.Tree) and child.data == "sequence":
233
+ sequences.append(self.sequence(child.children, owner))
234
+ elif isinstance(child, str) and child.strip() == "!!":
235
+ continue
236
+ else:
237
+ t = resolve_tree(child, keep_spacing=True).strip(" ,")
238
+ if t:
239
+ trailing_text.append(t)
240
+ text = f"{owner} -> {', '.join(sequences)}"
241
+ if trailing_text:
242
+ text += f", {', '.join(trailing_text)}"
243
+ return text
244
+
245
+ def top_level_sequence3(self, args):
246
+ owner = resolve_tree(args[0], keep_spacing=True).strip()
247
+ sequences, trailing = [], []
248
+ for child in args[1:]:
249
+ if isinstance(child, lark.Tree) and child.data == "sequence":
250
+ sequences.append(self.sequence(child.children, owner))
251
+ else:
252
+ t = resolve_tree(child, keep_spacing=True).strip(" ,")
253
+ if t and t != "!!!":
254
+ trailing.append(t)
255
+ text = f"{owner} -> {', '.join(sequences)}"
256
+ if trailing:
257
+ text += f", {', '.join(trailing)}"
258
+ return text
259
+
260
+
261
+
262
+ def nested_sequence(self, args):
263
+ # Извлекаем элементы, исключая завершающий символ
264
+ elements = [resolve_tree(arg, keep_spacing=True).strip(" ,~!;") for arg in args[:-1] if resolve_tree(arg).strip(" ,~!;")]
265
+ # Проверяем завершающий символ
266
+ terminator = args[-1] if args and isinstance(args[-1], str) else None
267
+ if terminator == "~":
268
+ return self.rng.choice(elements) if elements else "empty_prompt"
269
+ return f"[{' | '.join(elements)}]"
270
+
271
+ def emphasized(self, args):
272
+ prompt = resolve_tree(args[0], keep_spacing=True)
273
+ try:
274
+ weight = float(args[1]) if len(args) > 1 and isinstance(args[1], lark.Token) and args[1].type == "NUMBER" else 1.1
275
+ except ValueError:
276
+ weight = 1.0
277
+ return f"({prompt}:{weight})"
278
+
279
+ def scheduled(self, args):
280
+ prompts = [arg for arg in args[:-1] if not isinstance(arg, lark.Token) or arg.type != "NUMBER"]
281
+ number_node = args[-1]
282
+ if isinstance(number_node, lark.Tree):
283
+ number_node = resolve_tree(number_node, keep_spacing=True)
284
+ try:
285
+ weight = float(number_node)
286
+ except ValueError:
287
+ weight = 1.0
288
+
289
+ boundary = int(weight * self.total_steps) if weight <= 1.0 else int(weight)
290
+ boundary = max(1, min(boundary, self.total_steps))
291
+
292
+ if not prompts:
293
+ return "empty_prompt"
294
+ if len(prompts) == 1:
295
+ return f"({resolve_tree(prompts[0], keep_spacing=True)}:{weight})" if self.current_step >= boundary else ""
296
+ step_increment = boundary / max(1, len(prompts))
297
+ for i, prompt in enumerate(prompts):
298
+ step = min(self.total_steps, int(i * step_increment)) if i < len(prompts) - 1 else self.total_steps
299
+ if self.current_step <= step:
300
+ return f"({resolve_tree(prompt, keep_spacing=True)}:{weight})"
301
+ return f"({resolve_tree(prompts[-1], keep_spacing=True)}:{weight})"
302
+
303
+ class CollectSteps(lark.Visitor):
304
+
305
+ def visit_prompt(self, tree):
306
+ # Detect simple bracket scheduling like '... [a:b:c:3] ...' without pipes and turn into schedule
307
+ full = resolve_tree(tree, keep_spacing=True)
308
+ if '[' in full and ']' in full and '|' not in full and '(' not in full and ')' not in full:
309
+ pre, inner, post = full, "", ""
310
+ try:
311
+ lb = full.index('['); rb = full.rindex(']')
312
+ pre, inner, post = full[:lb], full[lb+1:rb], full[rb+1:]
313
+ parts = [p.strip() for p in inner.split(':') if p.strip()]
314
+ if len(parts) >= 2 and re.match(r'^[-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?$', parts[-1]):
315
+ boundary_f = float(parts[-1])
316
+ prompts = parts[:-1]
317
+ if prompts:
318
+ def _clamp(x): return max(1, min(int(round(x)), self.steps))
319
+ schedules = []
320
+ pre_s = self.prefix + pre
321
+ suf_s = post + self.suffix
322
+ if len(prompts) == 1:
323
+ boundary = _clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
324
+ schedules.append([boundary, pre_s + self.suffix])
325
+ schedules.append([self.steps, pre_s + prompts[0] + self.suffix])
326
+ return [[e, _apply_and(t)] for e,t in schedules]
327
+ else:
328
+ boundary = _clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
329
+ if boundary < len(prompts):
330
+ boundary = len(prompts)
331
+ step_size = boundary / len(prompts)
332
+ if int(round(step_size)) > 1:
333
+ schedules.append([1, pre_s + self.suffix])
334
+ for i, ptxt in enumerate(prompts):
335
+ start = _clamp(int(round(i * step_size)) + 1)
336
+ end = _clamp(int(round((i + 1) * step_size)))
337
+ if start < end:
338
+ schedules.append([end, pre_s + ptxt + self.suffix])
339
+ if schedules and schedules[-1][0] < self.steps:
340
+ schedules.append([self.steps, pre_s + prompts[-1] + self.suffix])
341
+ return [[e, _apply_and(t)] for e,t in schedules] or [[self.steps, pre_s + prompts[-1] + self.suffix]]
342
+ except ValueError:
343
+ pass
344
+ # 2b) '[inner]:N' form (no pipes inside) -> schedule after boundary N
345
+ if '[' in full and ']' in full and '|' not in full:
346
+ import re as _re
347
+ m = _re.match(r'^(.*)\[(.*?)\]\s*:\s*([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)\s*(.*)$', full)
348
+ if m:
349
+ pre, inner, boundary_txt, post = m.groups()
350
+ if '(' not in inner and ')' not in inner:
351
+ try:
352
+ boundary_f = float(boundary_txt)
353
+ prompts = [p.strip() for p in inner.split(':') if p.strip()] or []
354
+ if not prompts and inner.strip():
355
+ prompts = [inner.strip()]
356
+ if prompts:
357
+ def clamp(x): return max(1, min(int(round(x)), self.steps))
358
+ pre_s = self.prefix + pre
359
+ suf_s = post + self.suffix
360
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
361
+ if len(prompts) == 1:
362
+ return [[boundary, pre_s + suf_s], [self.steps, pre_s + prompts[0] + suf_s]]
363
+ if boundary < len(prompts):
364
+ boundary = len(prompts)
365
+ step_size = boundary / len(prompts)
366
+ schedules = []
367
+ if int(round(step_size)) > 1:
368
+ schedules.append([1, pre_s + suf_s])
369
+ for i, ptxt in enumerate(prompts):
370
+ start = clamp(int(round(i * step_size)) + 1)
371
+ end = clamp(int(round((i + 1) * step_size)))
372
+ if start < end:
373
+ schedules.append([end, pre_s + ptxt + suf_s])
374
+ if schedules and schedules[-1][0] < self.steps:
375
+ schedules.append([self.steps, pre_s + prompts[-1] + suf_s])
376
+ return [[e, _apply_and(t)] for e,t in schedules] or [[self.steps, pre_s + prompts[-1] + suf_s]]
377
+ except ValueError:
378
+ pass
379
+
380
+ return self._default_visit(tree)
381
+ def __init__(self, steps, prefix="", suffix="", depth=0, use_scheduling=True, seed=None):
382
+ super().__init__()
383
+ self.steps = steps
384
+ self.prefix = prefix
385
+ self.suffix = suffix
386
+ self.depth = depth
387
+ self.use_scheduling = use_scheduling
388
+ self.seed = seed
389
+ self.rng = random.Random(seed) if seed is not None else random
390
+ self.schedules = []
391
+
392
+ def visit(self, tree):
393
+ if isinstance(tree, lark.Tree):
394
+ method_name = f"visit_{tree.data}"
395
+ method = getattr(self, method_name, self._default_visit)
396
+ return method(tree)
397
+ elif isinstance(tree, lark.Token):
398
+ return self._visit_token(tree)
399
+ return []
400
+
401
+ def visit_start(self, tree):
402
+ full = resolve_tree(tree, keep_spacing=True).strip()
403
+
404
+ # 0) owner::a::b!!, trailing -> owner -> owner: a, owner: b, trailing
405
+ if "::" in full and "!!" in full and all(ch not in full for ch in '[]()'):
406
+ left, trailing = full.split("!!", 1)
407
+ owner, rest = left.split("::", 1)
408
+ descriptors = [x.strip(' ,~!;') for x in rest.split('::') if x.strip(' ,~!;')]
409
+ sequences = [f"{owner.strip()}: {d}" for d in descriptors]
410
+ trailing_text = [t.strip(' ,') for t in trailing.split(',') if t.strip(' ,')]
411
+ out = f"{owner.strip()} -> {', '.join(sequences)}"
412
+ if trailing_text:
413
+ out += f", {', '.join(trailing_text)}"
414
+ return [[self.steps, self.prefix + out + self.suffix]]
415
+
416
+ # 1) owner::a::b! -> owner: a, b
417
+ if '::' in full and (full.endswith('!') or full.endswith(';')) and all(ch not in full for ch in '[]()'):
418
+ owner, rest = full.split('::', 1)
419
+ rest = rest[:-1]
420
+ descriptors = [x.strip(' ,~!;') for x in rest.split('::') if x.strip(' ,~!;')]
421
+ text = f"{owner.strip()}: {', '.join(descriptors)}"
422
+ return [[self.steps, self.prefix + text + self.suffix]]
423
+
424
+
425
+ # 2) ... [a:b:c:3] ... (без '|') — равномерное расписание до границы 3 (или процента)
426
+ if '[' in full and ']' in full and '|' not in full and '(' not in full and ')' not in full:
427
+ try:
428
+ lb, rb = full.index('['), full.rindex(']')
429
+ pre, inner, post = full[:lb], full[lb+1:rb], full[rb+1:]
430
+ parts = [p.strip() for p in inner.split(':') if p.strip()]
431
+
432
+ if len(parts) >= 2 and re.match(r'^[-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?$', parts[-1]):
433
+ boundary_f = float(parts[-1])
434
+ prompts = parts[:-1]
435
+
436
+ if prompts:
437
+ def clamp(x):
438
+ return max(1, min(int(round(x)), self.steps))
439
+
440
+ schedules = []
441
+ pre_s = self.prefix + pre
442
+ suf_s = post + self.suffix
443
+
444
+ if len(prompts) == 1:
445
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
446
+ schedules.append([boundary, pre_s + self.suffix])
447
+ schedules.append([self.steps, pre_s + prompts[0] + self.suffix])
448
+ return [[e, _apply_and(t)] for e,t in schedules]
449
+
450
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
451
+ if boundary < len(prompts):
452
+ boundary = len(prompts)
453
+
454
+ step_size = boundary / len(prompts)
455
+ if int(round(step_size)) > 1:
456
+ schedules.append([1, pre_s + self.suffix])
457
+
458
+ for i, ptxt in enumerate(prompts):
459
+ start = clamp(int(round(i * step_size)) + 1)
460
+ end = clamp(int(round((i + 1) * step_size)))
461
+ if start < end:
462
+ schedules.append([end, pre_s + ptxt + self.suffix])
463
+
464
+ if schedules and schedules[-1][0] < self.steps:
465
+ schedules.append([self.steps, pre_s + prompts[-1] + self.suffix])
466
+
467
+ return [[e, _apply_and(t)] for e,t in schedules] or [[self.steps, pre_s + prompts[-1] + self.suffix]]
468
+
469
+ except ValueError:
470
+ pass
471
+
472
+ # по умолчанию — стандартный обход
473
+ return self._default_visit(tree)
474
+
475
+
476
+ def _default_visit(self, tree):
477
+ schedules = []
478
+ has_tree_child = any(isinstance(c, lark.Tree) for c in tree.children)
479
+ for i, child in enumerate(tree.children):
480
+ if isinstance(child, lark.Token) and child.type == "WHITESPACE":
481
+ continue
482
+ # если есть структурные узлы — пропускаем сырые токены, иначе возникают дубликаты
483
+ if has_tree_child and isinstance(child, lark.Token):
484
+ continue
485
+ pre = "".join(
486
+ resolve_tree(c, keep_spacing=True)
487
+ for j, c in enumerate(tree.children)
488
+ if j < i and not (isinstance(c, lark.Token) and c.type == "WHITESPACE")
489
+ )
490
+ post = "".join(
491
+ resolve_tree(c, keep_spacing=True)
492
+ for j, c in enumerate(tree.children)
493
+ if j > i and not (isinstance(c, lark.Token) and c.type == "WHITESPACE")
494
+ )
495
+ collector = CollectSteps(
496
+ self.steps,
497
+ prefix=self.prefix + pre,
498
+ suffix=post + self.suffix,
499
+ depth=self.depth + 1,
500
+ use_scheduling=self.use_scheduling,
501
+ seed=self.seed,
502
+ )
503
+ child_schedules = collector.visit(child)
504
+ schedules.extend(child_schedules)
505
+ return [[e, _apply_and(t)] for e,t in schedules]
506
+
507
+
508
+ def _visit_token(self, token):
509
+ if token.type == "WHITESPACE":
510
+ return []
511
+ return [[self.steps, self.prefix + str(token) + self.suffix]]
512
+
513
+ def visit_plain(self, tree):
514
+ text = resolve_tree(tree, keep_spacing=True)
515
+ return [[self.steps, self.prefix + text + self.suffix]]
516
+
517
+ def visit_top_level_sequence3(self, tree):
518
+ transformer = ScheduleTransformer(self.steps, 1, self.seed)
519
+ text = transformer.transform(tree)
520
+ return [[self.steps, self.prefix + text + self.suffix]]
521
+
522
+ def visit_scheduled(self, tree):
523
+ if not tree.children:
524
+ return [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
525
+
526
+ # 1) Извлечение как у тебя
527
+ prompts = [
528
+ p for p in tree.children
529
+ if not (isinstance(p, lark.Token) and p.type == "NUMBER")
530
+ and not (isinstance(p, lark.Tree) and getattr(p, "data", None) in ("step_range_list", "reverse_flag"))
531
+ ]
532
+ number_node = next((p for p in tree.children if isinstance(p, lark.Token) and p.type == "NUMBER"), None)
533
+ step_range_list = next((p for p in tree.children if isinstance(p, lark.Tree) and getattr(p, "data", None) == "step_range_list"), None)
534
+ is_reverse = any(isinstance(p, lark.Tree) and getattr(p, "data", None) == "reverse_flag" for p in tree.children)
535
+
536
+ try:
537
+ weight = float(number_node.value) if number_node is not None else 1.0
538
+ except (ValueError, TypeError, AttributeError):
539
+ weight = 1.0
540
+
541
+ if not prompts:
542
+ return [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
543
+
544
+ # Вспомогалка
545
+ def _clamp_step(x: int) -> int:
546
+ return max(1, min(x, self.steps))
547
+
548
+ # 4) Интервалы
549
+ step_intervals = []
550
+ explicit_ranges = False
551
+ if step_range_list:
552
+ explicit_ranges = True
553
+ for sr in step_range_list.children:
554
+ if not (isinstance(sr, lark.Tree) and getattr(sr, "data", None) == "step_range"):
555
+ continue
556
+ if len(sr.children) != 2:
557
+ continue
558
+ start_txt = resolve_tree(sr.children[0], keep_spacing=False)
559
+ end_txt = resolve_tree(sr.children[1], keep_spacing=False)
560
+
561
+ def _to_steps(txt: str) -> int:
562
+ s = txt.strip()
563
+ if s.endswith("%"):
564
+ try:
565
+ return int(round(float(s[:-1]) / 100.0 * self.steps))
566
+ except ValueError:
567
+ return 1
568
+ try:
569
+ return int(round(float(s)))
570
+ except ValueError:
571
+ return 1
572
+
573
+ start_step = _clamp_step(_to_steps(start_txt))
574
+ end_step = _clamp_step(_to_steps(end_txt))
575
+ if start_step < end_step:
576
+ step_intervals.append((start_step, end_step))
577
+ else:
578
+ num_prompts = len(prompts)
579
+ boundary = int(round(weight * self.steps)) if weight <= 1.0 else int(round(weight))
580
+ boundary = _clamp_step(boundary)
581
+ if num_prompts == 1:
582
+ # Особый случай: один вариант внутри [] — включаем его ПОСЛЕ boundary
583
+ before_end = boundary
584
+ after_end = self.steps
585
+ schedules = []
586
+ # BEFORE: без содержимого [], но сохраняем пробел между префиксом и суффиксом если нужен
587
+ spacer = " " if (self.prefix and not self.prefix.endswith(" ")) else ""
588
+ schedules.append([before_end, self.prefix + spacer + self.suffix])
589
+ # AFTER: с единственным текстом
590
+ last_text = resolve_tree(prompts[0], keep_spacing=True)
591
+ spacer2 = " " if (self.prefix and last_text and not self.prefix.endswith(" ")) else ""
592
+ schedules.append([after_end, self.prefix + spacer2 + last_text + self.suffix])
593
+ return [[e, _apply_and(t)] for e,t in schedules]
594
+ else:
595
+ # Несколько вариантов: равномерное деление в пределах boundary
596
+ if boundary < num_prompts:
597
+ boundary = num_prompts
598
+ step_size = boundary / num_prompts
599
+ for i in range(num_prompts):
600
+ start = int(round(i * step_size)) + 1
601
+ end = int(round((i + 1) * step_size))
602
+ start = _clamp_step(start); end = _clamp_step(end)
603
+ if start < end:
604
+ step_intervals.append((start, end))
605
+
606
+ # 5) reverse
607
+ if is_reverse:
608
+ prompts = prompts[::-1]
609
+ step_intervals = step_intervals[::-1]
610
+
611
+ # 6) Формируем расписания
612
+ schedules = []
613
+
614
+ # ДО первого интервала (для явных диапазонов/несколько промптов)
615
+ if step_intervals and step_intervals[0][0] > 1:
616
+ schedules.append([step_intervals[0][0] - 1, self.prefix + self.suffix])
617
+
618
+ for i, (start, end) in enumerate(step_intervals[:len(prompts)]):
619
+ end = min(end, self.steps)
620
+ if start < end:
621
+ p = prompts[i]
622
+ if isinstance(p, lark.Tree):
623
+ child_schedules = self.visit(p)
624
+ else:
625
+ text = resolve_tree(p, keep_spacing=True)
626
+ child_schedules = [[self.steps, text]]
627
+
628
+ for sched in child_schedules:
629
+ schedules.append([end, self.prefix + sched[1] + self.suffix])
630
+
631
+ # ПОСЛЕ последнего интервала — берём последний prompt
632
+ if step_intervals and step_intervals[-1][1] < self.steps:
633
+ tail_text = resolve_tree(prompts[-1], keep_spacing=True)
634
+ schedules.append([self.steps, self.prefix + tail_text + self.suffix])
635
+
636
+ if not schedules:
637
+ return [[self.steps, self.prefix + resolve_tree(tree, keep_spacing=True) + self.suffix]]
638
+
639
+ return [[e, _apply_and(t)] for e,t in schedules]
640
+
641
+ def visit_alternate(self, tree):
642
+ # Special-case: bracket content like "[a:b:c:3]" (no pipes) -> scheduled over boundary=3
643
+ inner_tokens = []
644
+ for child in tree.children:
645
+ if isinstance(child, lark.Token) and child.type in ("WHITESPACE",):
646
+ continue
647
+ t = resolve_tree(child, keep_spacing=True)
648
+ if t in ("[", "]"):
649
+ continue
650
+ inner_tokens.append(t)
651
+ inner_text = "".join(inner_tokens).strip()
652
+ if '|' not in inner_text and ':' in inner_text:
653
+ parts = [p.strip() for p in inner_text.split(':') if p.strip() != ""]
654
+ if len(parts) >= 2:
655
+ last = parts[-1]
656
+ try:
657
+ boundary_f = float(last)
658
+ prompts = parts[:-1]
659
+ if prompts:
660
+ # compute schedules similar to visit_emphasized scheduled handling
661
+ def _clamp(x): return max(1, min(int(round(x)), self.steps))
662
+ schedules = []
663
+ if len(prompts) == 1:
664
+ boundary = _clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
665
+ schedules.append([boundary, self.prefix + self.suffix])
666
+ schedules.append([self.steps, self.prefix + prompts[0] + self.suffix])
667
+ return [[e, _apply_and(t)] for e,t in schedules]
668
+ else:
669
+ boundary = _clamp(boundary_f if boundary_f > 1.0 else boundary_f * self.steps)
670
+ if boundary < len(prompts):
671
+ boundary = len(prompts)
672
+ step_size = boundary / len(prompts)
673
+ # pre
674
+ if int(round(step_size)) > 1:
675
+ schedules.append([1, self.prefix + self.suffix])
676
+ for i, ptxt in enumerate(prompts):
677
+ start = _clamp(int(round(i * step_size)) + 1)
678
+ end = _clamp(int(round((i + 1) * step_size)))
679
+ if start < end:
680
+ schedules.append([end, self.prefix + ptxt + self.suffix])
681
+ if schedules and schedules[-1][0] < self.steps:
682
+ schedules.append([self.steps, self.prefix + prompts[-1] + self.suffix])
683
+ return [[e, _apply_and(t)] for e,t in schedules] or [[self.steps, self.prefix + prompts[-1] + self.suffix]]
684
+ except ValueError:
685
+ pass
686
+ options = []
687
+ for child in tree.children:
688
+ if isinstance(child, lark.Token) and child.type == "WHITESPACE":
689
+ continue
690
+ child_schedules = self.visit(child)
691
+ child_options = [
692
+ sched[1].strip(" ,|")
693
+ for sched in child_schedules
694
+ if sched[1].strip(" ,|")
695
+ ]
696
+ options.append(
697
+ child_options
698
+ or [resolve_tree(child, keep_spacing=True).strip(" ,|")]
699
+ )
700
+
701
+ if not options:
702
+ return [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
703
+
704
+ if EXPAND_ALTERNATE_PER_STEP:
705
+ schedules = []
706
+ for step in range(1, self.steps + 1):
707
+ option = options[(step - 1) % len(options)]
708
+ for sched in option:
709
+ schedules.append([step, self.prefix + sched + self.suffix])
710
+ return [[e, _apply_and(t)] for e,t in schedules]
711
+ else:
712
+ # фиксируем один вариант на весь прогон
713
+ # выбираем группу по текущему сид/рандому и элемент в группе
714
+ group = options[self.rng.randrange(len(options))]
715
+ choice = self.rng.choice(group) if group else "empty_prompt"
716
+ return [[self.steps, self.prefix + choice + self.suffix]]
717
+
718
+
719
+ def visit_alternate_distinct(self, tree):
720
+ options = []
721
+ for child in tree.children:
722
+ if isinstance(child, lark.Token) and child.type == "WHITESPACE":
723
+ continue
724
+ child_schedules = self.visit(child)
725
+ child_options = [
726
+ sched[1].strip(" ,|")
727
+ for sched in child_schedules
728
+ if sched[1].strip(" ,|")
729
+ ]
730
+ options.append(
731
+ child_options
732
+ or [resolve_tree(child, keep_spacing=True).strip(" ,|")]
733
+ )
734
+
735
+ # сплющиваем все варианты
736
+ flat = [opt for group in options for opt in group]
737
+ if not flat:
738
+ return [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
739
+
740
+ selected = self.rng.choice(flat)
741
+ return [[self.steps, self.prefix + selected + self.suffix]]
742
+
743
+ def visit_alternate1(self, tree):
744
+ return self.visit_alternate_distinct(tree)
745
+
746
+ def visit_alternate2(self, tree):
747
+ options = [
748
+ resolve_tree(c, keep_spacing=True).strip()
749
+ for c in tree.children
750
+ if resolve_tree(c, keep_spacing=True).strip()
751
+ ]
752
+ suffix = options[0].split("_", 1)[1] if options and "_" in options[0] else ""
753
+ combined = []
754
+ for opt in options:
755
+ combined.append(opt if "_" in opt or not suffix else f"{opt}_{suffix}")
756
+ text = "|".join(combined) if combined else "empty_prompt"
757
+ return [[self.steps, self.prefix + text + self.suffix]]
758
+
759
+
760
+ def visit_grouped(self, tree):
761
+ all_options = []
762
+ for child in tree.children:
763
+ if isinstance(child, lark.Token) and child.type == "WHITESPACE":
764
+ continue
765
+ child_schedules = self.visit(child)
766
+ child_options = [sched[1].strip(" ,|") for sched in child_schedules if sched[1].strip(" ,|")]
767
+ all_options.append(child_options or [resolve_tree(child, keep_spacing=True).strip(" ,|")])
768
+ out = []
769
+ for i, combo in enumerate(product(*all_options)):
770
+ if i >= GROUP_COMBO_LIMIT:
771
+ break
772
+ text = ", ".join(combo).strip()
773
+ if text:
774
+ out.append([self.steps, self.prefix + text + self.suffix])
775
+
776
+ return out or [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
777
+
778
+ def visit_sequence(self, tree):
779
+ transformer = ScheduleTransformer(self.steps, 1, self.seed)
780
+ text = transformer.transform(tree)
781
+ return [[self.steps, self.prefix + text + self.suffix]]
782
+
783
+ def visit_nested_sequence(self, tree):
784
+ # Извлекаем элементы, исключая завершающий символ
785
+ elements = [
786
+ resolve_tree(c, keep_spacing=True).strip(" ,~!;")
787
+ for c in tree.children[:-1]
788
+ if resolve_tree(c, keep_spacing=True).strip(" ,~!;")
789
+ ]
790
+ # Проверяем завершающий символ
791
+ terminator = tree.children[-1] if tree.children and isinstance(tree.children[-1], lark.Token) else None
792
+ if terminator and terminator.value == "~":
793
+ text = self.rng.choice(elements) if elements else "empty_prompt"
794
+ else:
795
+ text = f"[{' | '.join(elements)}]"
796
+ return [[self.steps, self.prefix + text + self.suffix]]
797
+
798
+ def visit_numbered(self, tree):
799
+ quantity = int(tree.children[0])
800
+ distinct = False
801
+ if len(tree.children) > 1:
802
+ mark = tree.children[1]
803
+ try:
804
+ if str(mark) in ('!', '_'):
805
+ distinct = True
806
+ except Exception:
807
+ pass
808
+ target = tree.children[-1]
809
+
810
+ import lark as _l
811
+ options = []
812
+ def add_opts(node):
813
+ if isinstance(node, _l.Tree):
814
+ if getattr(node, "data", None) in ("alternate","alternate1","alternate2","prompt"):
815
+ for ch in node.children:
816
+ add_opts(ch)
817
+ else:
818
+ txt = resolve_tree(node, keep_spacing=True).strip(" ,|")
819
+ if txt:
820
+ options.append(txt)
821
+ elif isinstance(node, _l.Token):
822
+ txt = str(node).strip(" ,|")
823
+ if txt:
824
+ options.append(txt)
825
+
826
+ add_opts(target)
827
+ if not options:
828
+ child_schedules = self.visit(target)
829
+ options = [s[1].strip(" ,|") for s in child_schedules if s[1].strip(" ,|")]
830
+ if not options:
831
+ return [[self.steps, self.prefix + "empty_prompt" + self.suffix]]
832
+
833
+ if distinct:
834
+ seen = []
835
+ for opt in options:
836
+ if opt not in seen:
837
+ seen.append(opt)
838
+ if len(seen) == quantity:
839
+ break
840
+ if len(seen) < quantity:
841
+ seen += self.rng.choices(options, k=quantity - len(seen))
842
+ selected = seen
843
+ else:
844
+ selected = self.rng.choices(options, k=quantity)
845
+
846
+ return [[self.steps, self.prefix + ", ".join(selected) + self.suffix]]
847
+
848
+
849
+
850
+ def visit_and_rule(self, tree):
851
+ text = " and ".join(resolve_tree(c, keep_spacing=True) for c in tree.children if resolve_tree(c, keep_spacing=True))
852
+ return [[self.steps, self.prefix + text + self.suffix]]
853
+
854
+ def visit_emphasized(self, tree):
855
+ prompt = resolve_tree(tree.children[0], keep_spacing=True)
856
+ try:
857
+ weight = float(tree.children[1]) if len(tree.children) > 1 and isinstance(tree.children[1], lark.Token) and tree.children[1].type == "NUMBER" else 1.1
858
+ except (ValueError, TypeError):
859
+ weight = 1.1
860
+ return [[self.steps, self.prefix + f"({prompt}:{weight})" + self.suffix]]
861
+
862
+ def __call__(self, tree):
863
+ self.schedules = self.visit(tree)
864
+ # deduplicate identical entries while preserving order
865
+ uniq = []
866
+ seen = set()
867
+ for end_step, text in self.schedules or []:
868
+ key = (end_step, text)
869
+ if key not in seen:
870
+ uniq.append([end_step, text])
871
+ seen.add(key)
872
+ return uniq or [[self.steps, self.prefix + resolve_tree(tree, keep_spacing=True) + self.suffix]]
873
+
874
+ def at_step_from_schedule(step: int, schedule: list[list[int | str]]) -> str:
875
+ """
876
+ schedule: [[end_step:int, text:str], ...] — по возрастанию end_step
877
+ Возвращает text, активный на переданном step.
878
+ """
879
+ if not schedule:
880
+ return ""
881
+ for end_step, text in schedule:
882
+ try:
883
+ if step <= int(end_step):
884
+ return text
885
+ except Exception:
886
+ # на всякий случай — если end_step внезапно не число
887
+ continue
888
+ return schedule[-1][1]
889
+
890
+ def at_step(step: int, prompt_or_schedule, *, steps: int | None = None,
891
+ seed: int | None = 42, use_visitor: bool = True) -> str:
892
+ """
893
+ Универсальная обёртка:
894
+ - Если передали готовый schedule -> используем его
895
+ - Если передали строку промпта -> строим schedule и берём значение
896
+ """
897
+ # Готовый список [[end_step, text], ...]
898
+ if isinstance(prompt_or_schedule, list) and prompt_or_schedule and isinstance(prompt_or_schedule[0], list):
899
+ return at_step_from_schedule(step, prompt_or_schedule)
900
+
901
+ # Строка промпта
902
+ prompt = str(prompt_or_schedule)
903
+ if steps is None:
904
+ raise ValueError("steps is required when passing a prompt string to at_step(...)")
905
+ sched = get_schedule(prompt, steps, use_scheduling=True, seed=seed, use_visitor=use_visitor)
906
+ return at_step_from_schedule(step, sched)
907
+
908
+ @lru_cache(maxsize=CACHE_SIZE)
909
+ def _apply_and(text: str) -> str:
910
+ import re as _re
911
+ return _re.sub(r'\s*&\s*', ' and ', text)
912
+
913
+ @lru_cache(maxsize=CACHE_SIZE)
914
+ def get_schedule(prompt: str, steps: int, use_scheduling: bool, seed: int | None, use_visitor: bool = True):
915
+ import re as _re
916
+
917
+ # Пустой промпт → безопасный токен
918
+ if not str(prompt).strip():
919
+ return [[steps, SAFE_EMPTY]]
920
+ # Литеральные \n и \t -> реальные переводы/табуляции,
921
+ # чтобы одиночный m_seq не «съедал» две логические последовательности
922
+ if "\\n" in prompt or "\\t" in prompt:
923
+ prompt = prompt.replace("\\n", "\n").replace("\\t", "\t")
924
+ # --- Быстрые пути ---
925
+
926
+ # TL3: owner::: ... !!!
927
+ m_tl3 = _re.match(r'^\s*([^:\[\]\{\}\(\)]+?):::(.+?)!!!(?:,\s*(.*))?\s*$', prompt, _re.S)
928
+ if m_tl3:
929
+ owner, rest, trailing = m_tl3.groups()
930
+
931
+ # соберём части до "!!!"
932
+ parts = [p.strip() for p in rest.split(',') if p.strip()]
933
+ seq_texts = []
934
+ for seg in parts:
935
+ seg = seg.rstrip('!;').strip()
936
+ toks = [t.strip() for t in seg.split('::') if t.strip()]
937
+ if toks:
938
+ label, descs = toks[0], toks[1:]
939
+ s = f"{owner.strip()}: {label}"
940
+ if descs:
941
+ s += f", {', '.join(descs)}"
942
+ seq_texts.append(s)
943
+
944
+ # НОВОЕ: распарсить хвост после "!!!," — если это sequence, преобразуем
945
+ trailing_texts = []
946
+ if trailing:
947
+ for t in trailing.split(','):
948
+ t = t.strip()
949
+ if not t:
950
+ continue
951
+ # вид "weapon::sword!" или с ';'
952
+ mseq = _re.match(r'^\s*([^:\[\]\{\}\(\)]+?)::(.+?)([!;])\s*$', t)
953
+ if mseq:
954
+ seq_owner, rest2, _term = mseq.groups()
955
+ descs = [x.strip(' ,~!;') for x in rest2.split('::') if x.strip(' ,~!;')]
956
+ trailing_texts.append(f"{seq_owner.strip()}: {', '.join(descs)}")
957
+ else:
958
+ trailing_texts.append(t)
959
+
960
+ text = f"{owner.strip()} -> {', '.join(seq_texts)}"
961
+ if trailing_texts:
962
+ text += f", {', '.join(trailing_texts)}"
963
+ return [[steps, _apply_and(text)]]
964
+
965
+
966
+ # Numbered с квадратными скобками: "3![a|b|c]" или "3[a|b|c]"
967
+ m_num_alt = _re.match(r'^\s*(\d+)\s*([!_])?\s*\[([^\]]+)\]\s*$', str(prompt))
968
+ if m_num_alt:
969
+ import random as _rnd
970
+ qty_txt, mark, inner = m_num_alt.groups()
971
+ quantity = int(qty_txt)
972
+ options = [x.strip() for x in inner.split('|')]
973
+ options = [opt if opt else SAFE_EMPTY for opt in options]
974
+ options_unique = list(dict.fromkeys(options)) or [SAFE_EMPTY]
975
+ if mark: # distinct
976
+ if quantity <= len(options_unique):
977
+ chosen = options_unique[:quantity]
978
+ else:
979
+ need = quantity - len(options_unique)
980
+ pad = (options_unique * ((need + len(options_unique) - 1)//len(options_unique)))[:need]
981
+ chosen = options_unique + pad
982
+ else: # с повторами
983
+ rng = _rnd.Random(seed) if seed is not None else _rnd
984
+ chosen = rng.choices(options_unique, k=quantity)
985
+ return [[steps, ', '.join(chosen)]]
986
+
987
+ # '[a:b:...:N]' (+ префикс/суффикс)
988
+ m_inner = _re.match(r'^(.*)\[(.*?)\](.*)$', prompt)
989
+ if m_inner and ':' in m_inner.group(2) and '|' not in m_inner.group(2):
990
+ pre, inner, post = m_inner.groups()
991
+ parts = [p.strip() for p in inner.split(':') if p.strip()]
992
+ try:
993
+ if len(parts) >= 2 and _re.fullmatch(r'[-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?', parts[-1]):
994
+ boundary_f = float(parts[-1])
995
+ prompts = parts[:-1]
996
+ clamp = lambda x: max(1, min(int(round(x)), steps))
997
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * steps)
998
+ if len(prompts) == 1:
999
+ return [[boundary, f"{pre}{post}"], [steps, f"{pre}{prompts[0]}{post}"]]
1000
+ if prompts:
1001
+ b = max(boundary, len(prompts))
1002
+ step_size = b / len(prompts)
1003
+ schedules = []
1004
+ if int(round(step_size)) > 1:
1005
+ schedules.append([1, f"{pre}{post}"])
1006
+ for i, ptxt in enumerate(prompts):
1007
+ start = clamp(int(round(i * step_size)) + 1)
1008
+ end = clamp(int(round((i + 1) * step_size)))
1009
+ if start < end:
1010
+ schedules.append([end, f"{pre}{ptxt}{post}"])
1011
+ if schedules and schedules[-1][0] < steps:
1012
+ schedules.append([steps, f"{pre}{prompts[-1]}{post}"])
1013
+ return [[e, _apply_and(t)] for e, t in schedules] or [[steps, f"{pre}{prompts[-1]}{post}"]]
1014
+ except Exception:
1015
+ pass
1016
+
1017
+ # '[...]:N' c префиксом/суффиксом
1018
+ m_with_pre = _re.match(r'^(.*)\[(.*?)\]\s*:\s*([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)\s*(.*)$', prompt)
1019
+ if m_with_pre:
1020
+ pre, inner, boundary_txt, post = m_with_pre.groups()
1021
+ try:
1022
+ boundary_f = float(boundary_txt)
1023
+ clamp = lambda x: max(1, min(int(round(x)), steps))
1024
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * steps)
1025
+ inner_prompts = [p.strip() for p in inner.split(':') if p.strip()]
1026
+ if len(inner_prompts) == 1:
1027
+ return [[boundary, f"{pre}{post}"], [steps, f"{pre}{inner_prompts[0]}{post}"]]
1028
+ except Exception:
1029
+ pass
1030
+
1031
+ # '[...]:N' простой
1032
+ m_simple = _re.match(r'^\s*\[(.*?)\]\s*:\s*([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)\s*$', prompt)
1033
+ if m_simple:
1034
+ inner, boundary_txt = m_simple.groups()
1035
+ try:
1036
+ boundary_f = float(boundary_txt)
1037
+ clamp = lambda x: max(1, min(int(round(x)), steps))
1038
+ boundary = clamp(boundary_f if boundary_f > 1.0 else boundary_f * steps)
1039
+ return [[boundary, ""], [steps, inner.strip()]]
1040
+ except Exception:
1041
+ pass
1042
+
1043
+ # 'owner::...!!' (, trailing)?
1044
+ m_toplevel = _re.match(r'^\s*([^:\[\]\{\}\(\)]+?)::(.+?)!!(?:,\s*(.*))?\s*$', prompt)
1045
+ if m_toplevel:
1046
+ owner, rest, trailing = m_toplevel.groups()
1047
+
1048
+ # основная часть до "!!"
1049
+ descriptors = [x.strip(' ,~!;') for x in rest.split('::') if x.strip(' ,~!;')]
1050
+ seqs = [f"{owner.strip()}: {', '.join(descriptors)}"] if descriptors else []
1051
+
1052
+ # НОВОЕ: ра��парсить хвост после "!!," — если это sequence, преобразуем
1053
+ trailing_texts = []
1054
+ if trailing:
1055
+ for t in trailing.split(','):
1056
+ t = t.strip()
1057
+ if not t:
1058
+ continue
1059
+ # вид "weapon::sword!" или с ';'
1060
+ mseq = _re.match(r'^\s*([^:\[\]\{\}\(\)]+?)::(.+?)([!;])\s*$', t)
1061
+ if mseq:
1062
+ seq_owner, rest2, _term = mseq.groups()
1063
+ descs = [
1064
+ x.strip(' ,~!;') for x in rest2.split('::') if x.strip(' ,~!;')
1065
+ ]
1066
+ trailing_texts.append(f"{seq_owner.strip()}: {', '.join(descs)}")
1067
+ else:
1068
+ trailing_texts.append(t)
1069
+
1070
+ out = f"{owner.strip()} -> {seqs[0] if seqs else ''}".rstrip()
1071
+ if trailing_texts:
1072
+ out += f", {', '.join(trailing_texts)}"
1073
+ return [[steps, _apply_and(out)]]
1074
+
1075
+
1076
+ # 'owner::...!' или ';'
1077
+ m_seq = _re.match(r'^\s*([^:\[\]\{\}\(\)]+?)::(.+?)([!;])\s*$', prompt)
1078
+ if m_seq and '|' not in prompt and '[' not in prompt and ']' not in prompt and '(' not in prompt and ')' not in prompt:
1079
+ owner, rest, _term = m_seq.groups()
1080
+ descriptors = [x.strip(' ,~!;') for x in rest.split('::') if x.strip(' ,~!;')]
1081
+ return [[steps, _apply_and(f"{owner.strip()}: {', '.join(descriptors)}")]]
1082
+
1083
+ # Явные диапазоны: "[...]:N a-b,c-d [r]"
1084
+ m_ranges = _re.match(r'^\s*\[([^\]]+)\]\s*:\s*([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)\s+([0-9%\-\s,]+)(?:\s+(r|reverse))?\s*$', prompt)
1085
+ if m_ranges:
1086
+ inner, steps_txt, ranges_txt, rev = m_ranges.groups()
1087
+
1088
+ def _to_steps(txt: str) -> int:
1089
+ s = txt.strip()
1090
+ if s.endswith('%'):
1091
+ try:
1092
+ return max(1, min(steps, int(round(float(s[:-1]) / 100.0 * steps))))
1093
+ except Exception:
1094
+ return 1
1095
+ try:
1096
+ return max(1, min(steps, int(round(float(s)))))
1097
+ except Exception:
1098
+ return 1
1099
+
1100
+ prompts = [p.strip() for p in inner.split(':') if p.strip()]
1101
+ if rev:
1102
+ prompts = list(reversed(prompts))
1103
+
1104
+ ranges = []
1105
+ for part in ranges_txt.split(','):
1106
+ if '-' in part:
1107
+ a, b = part.split('-', 1)
1108
+ ra, rb = _to_steps(a), _to_steps(b)
1109
+ if ra < rb:
1110
+ ranges.append((ra, rb))
1111
+
1112
+ schedules = []
1113
+ if ranges and ranges[0][0] > 1:
1114
+ schedules.append([ranges[0][0] - 1, ""])
1115
+ for i, (start, end) in enumerate(ranges[:len(prompts)]):
1116
+ schedules.append([min(end, steps), prompts[i]])
1117
+ if ranges and ranges[-1][1] < steps and prompts:
1118
+ schedules.append([steps, prompts[-1]])
1119
+ return [[e, _apply_and(t)] for e, t in schedules] or [[steps, _apply_and(inner.strip())]]
1120
+
1121
+ # --- обычный парсинг Lark ---
1122
+ try:
1123
+ tree = schedule_parser.parse(prompt)
1124
+ except lark.exceptions.LarkError as e:
1125
+ logger.warning("Prompt parse failed: '%s' — %s", prompt, e)
1126
+ return [[steps, prompt]]
1127
+
1128
+ collector = CollectSteps(steps, use_scheduling=use_scheduling, seed=seed)
1129
+ schedules = collector(tree)
1130
+ try:
1131
+ schedules.sort(key=lambda x: int(x[0]))
1132
+ except Exception:
1133
+ pass
1134
+ if not schedules:
1135
+ return [[steps, prompt]]
1136
+
1137
+ if not use_visitor:
1138
+ rebuilt = []
1139
+ for end_step, _ in schedules:
1140
+ transformer = ScheduleTransformer(total_steps=steps, current_step=end_step, seed=seed)
1141
+ text = transformer.transform(tree)
1142
+ rebuilt.append([end_step, _apply_and(text)])
1143
+ return rebuilt
1144
+
1145
+ return [[e, _apply_and(t)] for e, t in schedules]
1146
+
1147
+ def get_learned_conditioning_prompt_schedules(
1148
+ prompts: list[str],
1149
+ base_steps: int,
1150
+ hires_steps: int | None = None,
1151
+ use_old_scheduling: bool = False,
1152
+ seed: int | None = 42,
1153
+ use_visitor: bool = True,
1154
+ ):
1155
+ """
1156
+ Возвращает список расписаний по одному на каждый промпт:
1157
+ [
1158
+ [[end_step, text], ...], # для prompts[0]
1159
+ [[end_step, text], ...], # для prompts[1]
1160
+ ...
1161
+ ]
1162
+ """
1163
+ # Выбор количества шагов по правилам A1111-совместимости
1164
+ steps = hires_steps if (hires_steps is not None and not use_old_scheduling) else base_steps
1165
+ # Когда включать "расписания": если нет hires_steps или явно просили старый режим
1166
+ use_scheduling = (hires_steps is None) or use_old_scheduling
1167
+
1168
+ # Аккуратный проход по каждому промпту через кэшируемую get_schedule(...)
1169
+ prompt_schedules = [
1170
+ get_schedule(p, steps, use_scheduling, seed, use_visitor=use_visitor)
1171
+ for p in prompts
1172
+ ]
1173
+ return prompt_schedules
1174
+
1175
+ ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
1176
+
1177
+ class SdConditioning(list):
1178
+ def __init__(self, prompts, is_negative_prompt=False, width=None, height=None, copy_from=None):
1179
+ super().__init__()
1180
+ self.extend(prompts)
1181
+ if copy_from is None:
1182
+ copy_from = prompts
1183
+ self.is_negative_prompt = is_negative_prompt or getattr(copy_from, 'is_negative_prompt', False)
1184
+ self.width = width or getattr(copy_from, 'width', None)
1185
+ self.height = height or getattr(copy_from, 'height', None)
1186
+
1187
+ def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False):
1188
+ res = []
1189
+ prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling)
1190
+ cache = {}
1191
+
1192
+ for prompt, prompt_schedule in zip(prompts, prompt_schedules):
1193
+ if not prompt_schedule:
1194
+ raise ValueError(f"Empty schedule for prompt '{prompt}'")
1195
+ cached = cache.get(prompt, None)
1196
+ if cached is not None:
1197
+ res.append(cached)
1198
+ continue
1199
+
1200
+ texts = SdConditioning([x[1] for x in prompt_schedule], copy_from=prompts)
1201
+ conds = model.get_learned_conditioning(texts)
1202
+ cond_schedule = []
1203
+
1204
+ for i, (end_at_step, _) in enumerate(prompt_schedule):
1205
+ if isinstance(conds, dict):
1206
+ cond = {k: v[i] for k, v in conds.items()}
1207
+ else:
1208
+ cond = conds[i]
1209
+ cond_schedule.append(ScheduledPromptConditioning(end_at_step, cond))
1210
+
1211
+ cache[prompt] = cond_schedule
1212
+ res.append(cond_schedule)
1213
+
1214
+ return res
1215
+
1216
+ def get_multicond_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False):
1217
+ steps = hires_steps if hires_steps is not None and not use_old_scheduling else steps
1218
+ prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling)
1219
+ conds_list, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts)
1220
+
1221
+ cache = {}
1222
+ res = []
1223
+ for prompt, schedule, conds in zip(prompts, prompt_schedules, conds_list):
1224
+ if not schedule:
1225
+ raise ValueError(f"Empty schedule for prompt '{prompt}'")
1226
+ cached = cache.get(prompt, None)
1227
+ if cached is not None:
1228
+ res.append(cached)
1229
+ continue
1230
+
1231
+ texts = SdConditioning([str(x[1]) for x in schedule], copy_from=prompts)
1232
+ try:
1233
+ model_conds = model.get_learned_conditioning(texts)
1234
+ except Exception:
1235
+ raise ValueError(f"Conditioning failed for prompt '{prompt}'")
1236
+
1237
+ prompt_schedule = []
1238
+ for index, weight in conds:
1239
+ cond_schedule = []
1240
+ for i, (end_at_step, _) in enumerate(schedule):
1241
+ try:
1242
+ end_at_step = int(end_at_step)
1243
+ except ValueError:
1244
+ raise ValueError(f"Invalid step boundary in schedule for prompt '{prompt}'")
1245
+ if isinstance(model_conds, dict):
1246
+ cond = {k: v[i] for k, v in model_conds.items()}
1247
+ else:
1248
+ cond = model_conds[i]
1249
+ cond_schedule.append(ScheduledPromptConditioning(end_at_step, cond))
1250
+ prompt_schedule.append(ComposableScheduledPromptConditioning(cond_schedule, weight))
1251
+
1252
+ shape = model_conds.shape if not isinstance(model_conds, dict) else model_conds['crossattn'].shape
1253
+ cache[prompt] = MulticondLearnedConditioning(shape, [prompt_schedule])
1254
+ res.append(cache[prompt])
1255
+
1256
+ shapes = {r.shape for r in res}
1257
+ if len(shapes) > 1:
1258
+ raise ValueError("Inconsistent tensor shapes across subprompts.")
1259
+
1260
+ if len(res) == 1:
1261
+ return res[0]
1262
+ # склейка батчей из нескольких MulticondLearnedConditioning
1263
+ agg_batch = []
1264
+ for mc in res:
1265
+ agg_batch.extend(mc.batch)
1266
+ return MulticondLearnedConditioning(shapes.pop(), agg_batch)
1267
+
1268
+ re_AND = re.compile(r"\bAND\b(?!_PERP|_SALT|_TOPK)", re.I)
1269
+ re_weight = re.compile(r"^((?:\s|.)*?)(?:\s*:\s*([-+]?(?:\d+\.?\d*|\d*\.\d+)(?:[eE][-+]?\d+)?))?\s*$")
1270
+
1271
+ def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
1272
+ res_indexes = []
1273
+ prompt_indexes = {}
1274
+ prompt_flat_list = SdConditioning(prompts)
1275
+ prompt_flat_list.clear()
1276
+
1277
+ for prompt in prompts:
1278
+ subprompts = re_AND.split(prompt)
1279
+ indexes = []
1280
+ for subprompt in subprompts:
1281
+ match = re_weight.search(subprompt)
1282
+ text, weight = match.groups() if match is not None else (subprompt, 1.0)
1283
+
1284
+ # Нормализация пустых подпрамптов → SAFE_EMPTY
1285
+ text = (text or "").strip()
1286
+ if not text:
1287
+ text = SAFE_EMPTY
1288
+
1289
+ try:
1290
+ weight = float(weight) if weight is not None else 1.0
1291
+ except ValueError:
1292
+ weight = 1.0
1293
+
1294
+ index = prompt_indexes.get(text, None)
1295
+ if index is None:
1296
+ index = len(prompt_flat_list)
1297
+ prompt_flat_list.append(text)
1298
+ prompt_indexes[text] = index
1299
+
1300
+ indexes.append((index, weight))
1301
+ res_indexes.append(indexes)
1302
+
1303
+ return res_indexes, prompt_flat_list, prompt_indexes
1304
+
1305
+
1306
+ class ComposableScheduledPromptConditioning:
1307
+ def __init__(self, schedules, weight=1.0):
1308
+ self.schedules: list[ScheduledPromptConditioning] = schedules
1309
+ self.weight: float = weight
1310
+
1311
+ class MulticondLearnedConditioning:
1312
+ def __init__(self, shape, batch):
1313
+ self.shape: tuple = shape
1314
+ self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
1315
+
1316
+ class DictWithShape(dict):
1317
+ def __init__(self, x, shape=None):
1318
+ super().__init__()
1319
+ self.update(x)
1320
+ self._shape = shape
1321
+
1322
+ @property
1323
+ def shape(self):
1324
+ if self._shape is not None:
1325
+ return self._shape
1326
+ any_val = self.get("crossattn")
1327
+ if any_val is None and self:
1328
+ any_val = next(iter(self.values()))
1329
+ return getattr(any_val, "shape", None)
1330
+
1331
+ def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
1332
+ import torch
1333
+ if not c or not c[0]:
1334
+ raise ValueError("Empty conditioning schedule")
1335
+ param = c[0][0].cond
1336
+ if param is None:
1337
+ raise ValueError("Invalid conditioning parameter")
1338
+ is_dict = isinstance(param, dict)
1339
+ if is_dict:
1340
+ dict_cond = param
1341
+ res = {k: torch.zeros((len(c),) + v.shape, device=getattr(v, "device", "cpu"), dtype=getattr(v, "dtype", torch.float32)) for k, v in dict_cond.items()}
1342
+ res = DictWithShape(res, (len(c),) + dict_cond.get('crossattn', next(iter(dict_cond.values()))).shape)
1343
+ else:
1344
+ res = torch.zeros((len(c),) + param.shape, device=getattr(param, "device", "cpu"), dtype=getattr(param, "dtype", torch.float32))
1345
+
1346
+ for i, cond_schedule in enumerate(c):
1347
+ target_index = 0
1348
+ for current, entry in enumerate(cond_schedule):
1349
+ if current_step <= entry.end_at_step:
1350
+ target_index = current
1351
+ break
1352
+ if is_dict:
1353
+ for k, v in cond_schedule[target_index].cond.items():
1354
+ res[k][i] = v
1355
+ else:
1356
+ res[i] = cond_schedule[target_index].cond
1357
+ return res
1358
+
1359
+ def stack_conds(tensors):
1360
+ import torch
1361
+ token_count = max([x.shape[0] for x in tensors])
1362
+ for i in range(len(tensors)):
1363
+ if tensors[i].shape[0] != token_count:
1364
+ last_vector = tensors[i][-1: ]
1365
+ last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1])
1366
+ tensors[i] = torch.vstack([tensors[i], last_vector_repeated])
1367
+ return torch.stack(tensors)
1368
+
1369
+ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
1370
+ import torch
1371
+ if not c.batch or not c.batch[0]:
1372
+ raise ValueError("Empty multicond batch")
1373
+ param = c.batch[0][0].schedules[0].cond
1374
+ if param is None:
1375
+ raise ValueError("Invalid conditioning parameter")
1376
+ tensors = []
1377
+ conds_list = []
1378
+ for composable_prompts in c.batch:
1379
+ conds_for_batch = []
1380
+ for composable_prompt in composable_prompts:
1381
+ target_index = 0
1382
+ for current, entry in enumerate(composable_prompt.schedules):
1383
+ try:
1384
+ end_at_step = int(entry.end_at_step)
1385
+ except ValueError:
1386
+ end_at_step = current_step
1387
+ if current_step <= end_at_step:
1388
+ target_index = current
1389
+ break
1390
+ conds_for_batch.append((len(tensors), composable_prompt.weight))
1391
+ tensors.append(composable_prompt.schedules[target_index].cond)
1392
+ conds_list.append(conds_for_batch)
1393
+
1394
+ if isinstance(tensors[0], dict):
1395
+ keys = list(tensors[0].keys())
1396
+ stacked = {k: stack_conds([x[k] for x in tensors]) for k in keys}
1397
+ stacked = DictWithShape(stacked, stacked.get('crossattn', next(iter(stacked.values()))).shape)
1398
+ else:
1399
+ stacked = stack_conds(tensors).to(device=getattr(param, "device", "cpu"), dtype=getattr(param, "dtype", torch.float32))
1400
+ return conds_list, stacked
1401
+
1402
+ re_attention = re.compile(r"""
1403
+ \\\(|
1404
+ \\\)|
1405
+ \\\[|
1406
+ \\]|
1407
+ \\\\|
1408
+ \\|
1409
+ \(|
1410
+ \[|
1411
+ :\s*([+-]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)\s*\)|
1412
+ \)|
1413
+ ]|
1414
+ [^\\()\[\]:]+|
1415
+ :
1416
+ """, re.X)
1417
+
1418
+ re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
1419
+
1420
+ def parse_prompt_attention(text):
1421
+ # пустой ввод -> безопасный токен
1422
+ if not text or not str(text).strip():
1423
+ return [[SAFE_EMPTY, 1.0]]
1424
+
1425
+ # normalize newlines to spaces for stable tokenization
1426
+ text = str(text).replace('\r\n', '\n').replace('\r', '\n')
1427
+ text = text.replace('\n', ' ').replace('\\n', ' ')
1428
+
1429
+ res = []
1430
+ round_brackets = []
1431
+ square_brackets = []
1432
+ round_bracket_multiplier = 1.1
1433
+ square_bracket_multiplier = 1 / 1.1
1434
+
1435
+ def multiply_range(start_position, multiplier):
1436
+ for p in range(start_position, len(res)):
1437
+ res[p][1] *= multiplier
1438
+
1439
+ pending_colon = False
1440
+ last_token_index = -1
1441
+
1442
+ for m in re_attention.finditer(text):
1443
+ text_match = m.group(0)
1444
+ wgrp = m.group(1) # число для ( ... : <num> )
1445
+
1446
+ if text_match.startswith('\\'):
1447
+ res.append([text_match[1:], 1.0])
1448
+ last_token_index = len(res) - 1
1449
+
1450
+ elif text_match == '(':
1451
+ round_brackets.append(len(res)); pending_colon = False
1452
+
1453
+ elif text_match == '[':
1454
+ square_brackets.append(len(res)); pending_colon = False
1455
+
1456
+ elif wgrp is not None and round_brackets:
1457
+ try:
1458
+ multiply_range(round_brackets.pop(), float(wgrp))
1459
+ except ValueError:
1460
+ multiply_range(round_brackets.pop(), 1.0)
1461
+ pending_colon = False
1462
+
1463
+ elif text_match == ')' and round_brackets:
1464
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
1465
+ pending_colon = False
1466
+
1467
+ elif text_match == ']' and square_brackets:
1468
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
1469
+ pending_colon = False
1470
+
1471
+ elif text_match == ':' and not round_brackets and not square_brackets:
1472
+ if 'SUPPRESS_STANDALONE_COLON' in globals() and SUPPRESS_STANDALONE_COLON:
1473
+ pending_colon = True
1474
+ else:
1475
+ res.append([':', 1.0])
1476
+ last_token_index = len(res) - 1
1477
+ pending_colon = False
1478
+
1479
+ else:
1480
+ chunk = text_match
1481
+
1482
+ if pending_colon and not (round_brackets or square_brackets):
1483
+ # применяем число после ':' к предыдущему НОРМАЛЬНОМУ токену
1484
+ mnum = re.match(r"^\s*([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)", chunk)
1485
+ if mnum:
1486
+ j = last_token_index
1487
+ while j >= 0 and res[j][0] in (':', 'BREAK'):
1488
+ j -= 1
1489
+ if j >= 0:
1490
+ try:
1491
+ res[j][1] = float(mnum.group(1))
1492
+ chunk = chunk[mnum.end():]
1493
+ except ValueError:
1494
+ pass
1495
+ pending_colon = False
1496
+
1497
+ # режем по BREAK
1498
+ parts = re.split(re_break, chunk)
1499
+ for i, part in enumerate(parts):
1500
+ if i > 0:
1501
+ res.append(["BREAK", -1])
1502
+ s = part.strip()
1503
+ if not s:
1504
+ continue
1505
+ if ('SUPPRESS_STANDALONE_COLON' in globals() and SUPPRESS_STANDALONE_COLON) and s == ":":
1506
+ continue
1507
+ res.append([s, 1.0])
1508
+ last_token_index = len(res) - 1
1509
+
1510
+ if round_brackets or square_brackets:
1511
+ pass
1512
+
1513
+ if not res:
1514
+ return [[SAFE_EMPTY, 1.0]]
1515
+
1516
+ # второй проход: word:weight ИЛИ word±weight
1517
+ rx_inline = re.compile(
1518
+ r'(?:'
1519
+ r'(\b[^\s:(){}\[\]]+)\s*:\s*' # g1: word
1520
+ r'([-+]?(?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)' # g2: weight
1521
+ r')|(?:'
1522
+ r'(\b[^\s:(){}\[\]]+)\s*' # g3: word
1523
+ r'([-+](?:\d+(?:\.\d+)?|\.\d+)(?:[eE][+-]?\d+)?)' # g4: weight with sign
1524
+ r')'
1525
+ )
1526
+
1527
+ new_res = []
1528
+ for txt, w in res:
1529
+ if txt == "BREAK" or w != 1.0:
1530
+ new_res.append([txt, w])
1531
+ continue
1532
+ pos = 0; changed = False
1533
+ for mm in rx_inline.finditer(txt):
1534
+ pre = txt[pos:mm.start()]
1535
+ if pre and pre.strip().upper() != "AND":
1536
+ new_res.append([pre, 1.0])
1537
+ if mm.group(1) is not None:
1538
+ word, wt = mm.group(1), float(mm.group(2))
1539
+ else:
1540
+ word, wt = mm.group(3), float(mm.group(4))
1541
+ new_res.append([word, wt]); changed = True
1542
+ pos = mm.end()
1543
+ if changed:
1544
+ tail = txt[pos:]
1545
+ if tail:
1546
+ new_res.append([tail, 1.0])
1547
+ else:
1548
+ new_res.append([txt, w])
1549
+ res = new_res
1550
+
1551
+ # убираем лидирующее "AND "
1552
+ norm = []
1553
+ for t, w in res:
1554
+ if t != "BREAK":
1555
+ nt = re.sub(r'^\s*AND\s+', '', t, flags=re.I).strip()
1556
+ if nt:
1557
+ norm.append([nt, w])
1558
+ else:
1559
+ norm.append([t, w])
1560
+ res = norm
1561
+
1562
+ # схлопываем соседей с одинаковым весом
1563
+ i = 0
1564
+ while i + 1 < len(res):
1565
+ if res[i][1] == res[i + 1][1] and res[i][0] not in (':','BREAK') and res[i + 1][0] not in (':','BREAK'):
1566
+ res[i][0] += res[i + 1][0]
1567
+ res.pop(i + 1)
1568
+ else:
1569
+ i += 1
1570
+
1571
+ return res
1572
+
1573
+ if __name__ == "__main__":
1574
+ import doctest, random
1575
+ doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
1576
+ random.seed(42)
1577
+
1578
+ g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0]
1579
+
1580
+ # scheduled — базовые
1581
+ assert g("test") == [[10, 'test']]
1582
+ assert g("a [b:3]") == [[3, 'a '], [10, 'a b']]
1583
+ assert g("[(a:2):3]") == [[3, ''], [10, '(a:2)']]
1584
+
1585
+ # grouped — лимит не нарушаем (на больших группах просто проверить длину)
1586
+ big = "{[a|b|c|d|e|f|g],[h|i|j|k|l|m|n],[o|p|q|r|s|t|u]}"
1587
+ res = g(big)
1588
+ assert len(res) <= GROUP_COMBO_LIMIT
1589
+
1590
+ # numbered — distinct (! и _)
1591
+ # здесь сравни повторы/без повторов по длине множества выбранных
1592
+ # (если хочется — можно парсить строку и split(', '))
1593
+
1594
+ # alternate_distinct — фиксированный выбор
1595
+ g2 = lambda p: get_learned_conditioning_prompt_schedules([p], 6)[0]
1596
+ one = g2("[cat|dog|fox]!")
1597
+ assert len(set([txt for _, txt in one])) == 1 # один и тот же вариант на всех шагах
1598
+
1599
+ print("All integration tests passed!")
sd_simple_kes_v1/simple_kes_v1.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from modules.sd_simple_kes_v1.get_sigmas import get_sigmas_karras, get_sigmas_exponential
4
+ import os
5
+ import yaml
6
+ import random
7
+ from datetime import datetime
8
+ import warnings
9
+ import math
10
+ from typing import Optional
11
+ import json
12
+ from modules.sd_simple_kes_v1.validate_config import validate_config
13
+
14
+
15
+
16
+ def simple_kes_scheduler_v1(n: int, sigma_min: float, sigma_max: float, device: torch.device) -> torch.Tensor:
17
+ scheduler = SimpleKEScheduler(n=n, sigma_min=sigma_min, sigma_max=sigma_max, device=device)
18
+ return scheduler()
19
+
20
+ class SharedLogger:
21
+ def __init__(self, debug=False):
22
+ self.debug = debug
23
+ self.log_buffer = []
24
+
25
+ def log(self, message):
26
+ if self.debug:
27
+ self.log_buffer.append(message)
28
+
29
+ def dump(self):
30
+ return "\n".join(self.log_buffer)
31
+
32
+
33
+ class SimpleKEScheduler:
34
+ """
35
+ SimpleKEScheduler
36
+ ------------------
37
+ A hybrid scheduler that combines Karras-style sigma sampling
38
+ with exponential decay and blending controls. Supports parameterized
39
+ customization for use in advanced diffusion pipelines.
40
+
41
+ Parameters:
42
+ - steps (int): Number of inference steps.
43
+ - device (torch.device): Target device (e.g. 'cuda').
44
+ - config (dict): Scheduler-specific configuration options.
45
+
46
+ Usage:
47
+ scheduler = SimpleKEScheduler(steps=30, device='cuda', config=config_dict)
48
+ sigmas = scheduler.get_sigmas()
49
+ """
50
+
51
+
52
+ def __init__(self, n: int, sigma_min: Optional[float] = None, sigma_max: Optional[float] = None, device: torch.device = "cuda", logger=None, **kwargs)->torch.Tensor:
53
+ self.steps = n if n is not None else 25
54
+ self.device = torch.device(device if isinstance(device, str) else device)
55
+ self.sigma_min = sigma_min
56
+ self.sigma_max = sigma_max
57
+
58
+ self.RANDOMIZATION_TYPE_ALIASES = {
59
+ 'symmetric': 'symmetric', 'sym': 'symmetric', 's': 'symmetric',
60
+ 'asymmetric': 'asymmetric', 'assym': 'asymmetric', 'a': 'asymmetric',
61
+ 'logarithmic': 'logarithmic', 'log': 'logarithmic', 'l': 'logarithmic',
62
+ 'exponential': 'exponential', 'exp': 'exponential', 'e': 'exponential'
63
+ }
64
+
65
+
66
+ # Temporarily hold overrides from kwargs
67
+ self._overrides = kwargs.copy()
68
+ self.config_path = os.path.abspath(os.path.normpath(os.path.join("modules", "sd_simple_kes_v1", "kes_config", "default_config.yaml")))
69
+ self.config_data = self.load_config()
70
+ self.config = self.config_data.copy()
71
+ self.settings = self.config.copy()
72
+
73
+ # Apply overrides from kwargs if present
74
+ for k, v in self._overrides.items():
75
+ if k in self.settings:
76
+ self.settings[k] = v
77
+ setattr(self, k, v)
78
+
79
+ self.debug = self.settings.get("debug", False)
80
+ logger = SharedLogger(debug=self.debug)
81
+ self.logger=logger
82
+ self.log = self.logger.log
83
+ validate_config(self.config, logger=self.logger)
84
+
85
+ for key, value in self.settings.items():
86
+ setattr(self, key, value)
87
+
88
+ if self.settings.get("global_randomize", False):
89
+ self.apply_global_randomization()
90
+ self.settings = self.settings.copy()
91
+
92
+ self.re_randomizable_keys = [
93
+ "sigma_min", "sigma_max", "start_blend", "end_blend", "sharpness",
94
+ "early_stopping_threshold",
95
+ "initial_step_size", "final_step_size",
96
+ "initial_noise_scale", "final_noise_scale",
97
+ "smooth_blend_factor", "step_size_factor", "noise_scale_factor", "rho"
98
+ ]
99
+
100
+ for key in self.re_randomizable_keys:
101
+ value = self.settings.get(key)
102
+ if value is None:
103
+ raise KeyError(f"[KEScheduler] Missing required setting: {key}")
104
+ setattr(self, key, value)
105
+
106
+
107
+
108
+
109
+ def __call__(self):
110
+ sigmas = self.compute_sigmas()
111
+ if torch.isnan(sigmas).any():
112
+ raise ValueError("[SimpleKEScheduler] NaN detected in sigmas")
113
+ if torch.isinf(sigmas).any():
114
+ raise ValueError("[SimpleKEScheduler] Inf detected in sigmas")
115
+ if (sigmas <= 0).all():
116
+ raise ValueError("[SimpleKEScheduler] All sigma values are <= 0")
117
+ if (sigmas > 1000).all():
118
+ raise ValueError("[SimpleKEScheduler] Sigma values are extremely large — might explode the model")
119
+
120
+ self.save_generation_settings()
121
+ return sigmas
122
+
123
+ def save_generation_settings(self):
124
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
125
+ folder = os.path.join("modules", "sd_simple_kes_v1", "image_generation_data")
126
+ os.makedirs(folder, exist_ok=True)
127
+ filename = os.path.join(folder, f"generation_log_{timestamp}.txt")
128
+
129
+ with open(filename, "w") as f:
130
+ for line in self.logger.log_buffer: # 🔄 Use logger's buffer
131
+ f.write(f"{line}\n")
132
+
133
+ self.log(f"[SimpleKEScheduler] Generation log saved to {filename}")
134
+ self.logger.log_buffer.clear() # 🔄 Clear logger’s buffer, not self.log_buffer
135
+
136
+ def load_config(self):
137
+ try:
138
+ with open(self.config_path, 'r') as f:
139
+ user_config = yaml.safe_load(f)
140
+ return user_config
141
+ except FileNotFoundError:
142
+ self.log(f"Config file not found: {self.config_path}. Using empty config.")
143
+ return {}
144
+ except yaml.YAMLError as e:
145
+ self.log(f"Error loading config file: {e}")
146
+ return {}
147
+
148
+ def apply_global_randomization(self):
149
+ """Force randomization for all eligible settings by enabling _rand flags and re-randomizing values."""
150
+
151
+ # First pass: turn on all _rand flags if corresponding _rand_min/_rand_max exists
152
+ for key in list(self.settings.keys()):
153
+ if key.endswith("_rand_min") or key.endswith("_rand_max"):
154
+ base_key = key.rsplit("_rand_", 1)[0]
155
+ rand_flag_key = f"{base_key}_rand"
156
+ self.settings[rand_flag_key] = True
157
+
158
+
159
+ # Step 2: If global_randomize is active, re-randomize all eligible keys
160
+ if self.settings.get("global_randomize", False):
161
+ if key not in self.settings:
162
+ raise KeyError(f"[apply_global_randomization] Missing required key: {key}")
163
+
164
+ default_val = self.settings[key]
165
+ randomized_val = self.get_random_or_default(key, default_val)
166
+ self.settings[key] = randomized_val
167
+ setattr(self, key, randomized_val)
168
+
169
+ def get_randomization_type(self, key_prefix):
170
+ """
171
+ Retrieves the randomization type for a given key, with fallback to 'asymmetric' if missing.
172
+ """
173
+ randomization_type_raw = self.settings.get(f'{key_prefix}_randomization_type', 'asymmetric')
174
+ randomization_type = self.RANDOMIZATION_TYPE_ALIASES.get(randomization_type_raw.lower(), 'asymmetric')
175
+ return randomization_type
176
+
177
+ def get_randomization_percent(self, key_prefix):
178
+ """
179
+ Retrieves the randomization percent for a given key, with fallback to 0.2 if missing.
180
+ """
181
+ return self.settings.get(f'{key_prefix}_randomization_percent', 0.2)
182
+
183
+
184
+ def get_random_between_min_max(self, key_prefix, default_value):
185
+ """
186
+ Picks a random value between _rand_min and _rand_max if _rand is True.
187
+ Otherwise, returns the base value.
188
+ """
189
+ randomize_flag = self.settings.get(f'{key_prefix}_rand', False)
190
+
191
+ if randomize_flag:
192
+ rand_min = self.settings.get(f'{key_prefix}_rand_min', default_value)
193
+ rand_max = self.settings.get(f'{key_prefix}_rand_max', default_value)
194
+
195
+ if rand_min == rand_max:
196
+ self.log(f"[Random Range] {key_prefix}: min and max are equal ({rand_min}). Using single value.")
197
+ return rand_min
198
+
199
+ value = random.uniform(rand_min, rand_max)
200
+ self.log(f"[Random Range] {key_prefix}: Picked random value {value} between {rand_min} and {rand_max}")
201
+ return value
202
+ else:
203
+ self.log(f"[Random Range] {key_prefix}: Randomization is OFF. Using base value {default_value}")
204
+ return default_value
205
+
206
+ def get_random_by_type(self, key_prefix, default_value):
207
+ randomization_enabled = self.settings.get(f'{key_prefix}_enable_randomization_type', False)
208
+
209
+ if not randomization_enabled:
210
+ self.log(f"[Randomization Type] {key_prefix}: Randomization type is OFF. Using base value {default_value}")
211
+ return default_value
212
+
213
+ randomization_type = self.get_randomization_type(key_prefix)
214
+ randomization_percent = self.get_randomization_percent(key_prefix)
215
+
216
+ if randomization_type == 'symmetric':
217
+ rand_min = default_value * (1 - randomization_percent)
218
+ rand_max = default_value * (1 + randomization_percent)
219
+ self.log(f"[Symmetric Randomization] {key_prefix}: Range {rand_min} to {rand_max}")
220
+
221
+
222
+ elif randomization_type == 'asymmetric':
223
+ rand_min = default_value * (1 - randomization_percent)
224
+ rand_max = default_value * (1 + (randomization_percent * 2))
225
+ self.log(f"[Asymmetric Randomization] {key_prefix}: Range {rand_min} to {rand_max}")
226
+
227
+ elif randomization_type == 'logarithmic':
228
+ rand_min = math.log(default_value * (1 - randomization_percent))
229
+ rand_max = math.log(default_value * (1 + randomization_percent))
230
+ value = math.exp(random.uniform(rand_min, rand_max))
231
+ self.log(f"[Logarithmic Randomization] {key_prefix}: Log-space randomization resulted in {value}")
232
+ return value
233
+
234
+ elif randomization_type == 'exponential':
235
+ rand_min = default_value * (1 - randomization_percent)
236
+ rand_max = default_value * (1 + randomization_percent)
237
+ base_value = random.uniform(rand_min, rand_max)
238
+ value = math.exp(base_value)
239
+ self.log(f"[Exponential Randomization] {key_prefix}: Randomized exponential value {value}")
240
+ return value
241
+
242
+ else:
243
+ self.log(f"[Randomization Type] {key_prefix}: Invalid randomization type {randomization_type}. Using base value.")
244
+ return default_value
245
+
246
+ value = random.uniform(rand_min, rand_max)
247
+
248
+ self.log(f"[Randomization Type] {key_prefix}: Randomized value {value}")
249
+ return value
250
+
251
+ def get_random_or_default(self, key_prefix, default_value):
252
+ """
253
+ Selects randomization method based on active flags:
254
+ - If both enabled → prioritize randomization type (or min/max if you prefer).
255
+ - If only one enabled → apply that one.
256
+ - If neither → return default value.
257
+ """
258
+ rand_type_enabled = self.settings.get(f'{key_prefix}_enable_randomization_type', False)
259
+ min_max_enabled = self.settings.get(f'{key_prefix}_rand', False)
260
+
261
+ if rand_type_enabled and min_max_enabled:
262
+ self.log(f"[Randomization Policy] Both min/max and randomization type enabled for {key_prefix}. System will prioritize randomization type.")
263
+ result_value = self.get_random_by_type(key_prefix, default_value)
264
+
265
+ elif rand_type_enabled:
266
+ result_value = self.get_random_by_type(key_prefix, default_value)
267
+ self.log(f"[Randomization] {key_prefix}: Applied randomization type. Final value: {result_value}")
268
+
269
+ elif min_max_enabled:
270
+ result_value = self.get_random_between_min_max(key_prefix, default_value)
271
+ self.log(f"[Randomization] {key_prefix}: Applied min/max randomization. Final value: {result_value}")
272
+
273
+ else:
274
+ result_value = default_value
275
+ self.log(f"[Randomization] {key_prefix}: No randomization applied. Using default value: {result_value}")
276
+
277
+ return result_value
278
+
279
+ def start_sigmas(self, steps, sigma_min, sigma_max, device):
280
+ """Retrieve randomized sigma_min and sigma_max using the structured randomizer, respecting auto mode."""
281
+ #self.log(f"[DEBUG Start Sigmas] Using pre-randomized sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
282
+ # Apply auto scaling if enabled
283
+
284
+ # Ensure sigma_min is less than sigma_max
285
+ if self.sigma_min >= self.sigma_max:
286
+ correction_factor = random.uniform(0.01, 0.99)
287
+ old_sigma_min = self.sigma_min
288
+ self.sigma_min = self.sigma_max * correction_factor
289
+ self.log(f"[Correction] sigma_min ({old_sigma_min}) was >= sigma_max ({self.sigma_max}). Adjusted sigma_min to {self.sigma_min} using correction factor {correction_factor}.")
290
+
291
+ self.log(f"Final sigmas: sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
292
+ return steps, self.sigma_min, self.sigma_max, device
293
+
294
+ def compute_sigmas(self)->torch.Tensor:
295
+
296
+ if self.steps is None:
297
+ raise ValueError("Number of steps must be provided.")
298
+ if isinstance(self.device, str):
299
+ self.device = torch.device(self.device)
300
+
301
+ """
302
+ Scheduler function that blends sigma sequences using Karras and Exponential methods with adaptive parameters.
303
+
304
+ Parameters:
305
+ n (int): Number of steps.
306
+ sigma_min (float): Minimum sigma value.
307
+ sigma_max (float): Maximum sigma value.
308
+ device (torch.device): The device on which to perform computations (e.g., 'cuda' or 'cpu').
309
+ start_blend (float): Initial blend factor for dynamic blending.
310
+ end_bend (float): Final blend factor for dynamic blending.
311
+ sharpen_factor (float): Sharpening factor to be applied adaptively.
312
+ early_stopping_threshold (float): Threshold to trigger early stopping.
313
+ initial_step_size (float): Initial step size for adaptive step size calculation.
314
+ final_step_size (float): Final step size for adaptive step size calculation.
315
+ initial_noise_scale (float): Initial noise scale factor.
316
+ final_noise_scale (float): Final noise scale factor.
317
+ step_size_factor: Adjust to compensate for oversmoothing
318
+ noise_scale_factor: Adjust to provide more variation
319
+
320
+ Returns:
321
+ torch.Tensor: A tensor of blended sigma values.
322
+ """
323
+
324
+
325
+ # Use the self.get_random_or_default function for each parameter
326
+ #if randomize = false, then it checks for each variable for randomize, if true, then that particular option is randomized, with the others using default or config defined values."
327
+ acceptable_keys = [
328
+ "sigma_min", "sigma_max", "start_blend", "end_blend", "sharpness",
329
+ "early_stopping_threshold", "initial_step_size",
330
+ "final_step_size", "initial_noise_scale", "final_noise_scale",
331
+ "smooth_blend_factor", "step_size_factor", "noise_scale_factor", "rho"
332
+ ]
333
+
334
+ for key in acceptable_keys:
335
+ default_val = self.settings[key]
336
+ value = self.get_random_or_default(key, default_val)
337
+ setattr(self, key, value)
338
+
339
+
340
+ if self.sigma_auto_enabled:
341
+ if self.sigma_auto_mode not in ["sigma_min", "sigma_max"]:
342
+ raise ValueError(f"[Config Error] Invalid sigma_auto_mode: {self.sigma_auto_mode}. Must be 'sigma_min' or 'sigma_max'.")
343
+
344
+ if self.sigma_auto_mode == "sigma_min":
345
+ self.sigma_min = self.sigma_max / self.sigma_scale_factor
346
+ self.log(f"[Auto Sigma Min] sigma_min set to {self.sigma_min} using scale factor {self.sigma_scale_factor}")
347
+
348
+ elif self.sigma_auto_mode == "sigma_max":
349
+ self.sigma_max = self.sigma_min * self.sigma_scale_factor
350
+ self.log(f"[Auto Sigma Max] sigma_max set to {self.sigma_max} using scale factor {self.sigma_scale_factor}")
351
+
352
+ # Always apply min_threshold AFTER auto scaling
353
+ self.min_threshold = random.uniform(1e-5, 5e-5)
354
+
355
+ if self.sigma_min < self.min_threshold:
356
+ self.log(f"[Threshold Enforcement] sigma_min was too low: {self.sigma_min} < min_threshold {self.min_threshold}")
357
+ self.sigma_min = self.min_threshold
358
+
359
+ if self.sigma_max < self.min_threshold:
360
+ self.log(f"[Threshold Enforcement] sigma_max was too low: {self.sigma_max} < min_threshold {self.min_threshold}")
361
+ self.sigma_max = self.min_threshold
362
+
363
+ # Now it's safe to compute sigmas
364
+ start = math.log(self.sigma_max)
365
+ end = math.log(self.sigma_min)
366
+ self.sigmas = torch.linspace(start, end, self.steps, device=self.device).exp()
367
+
368
+ # Ensure sigmas contain valid values before using them
369
+ if torch.any(self.sigmas > 0):
370
+ self.sigma_min, self.sigma_max = self.sigmas[self.sigmas > 0].min(), self.sigmas.max()
371
+ else:
372
+ # If sigmas are all invalid, set a safe fallback
373
+ self.sigma_min, self.sigma_max = self.min_threshold, self.min_threshold
374
+ self.log(f"Debugging Warning: No positive sigma values found! Setting fallback sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
375
+
376
+ self.log(f"Using device: {self.device}")
377
+ # Generate sigma sequences using Karras and Exponential methods
378
+ self.start_sigmas(steps=self.steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, device=self.device)
379
+ self.sigmas_karras = get_sigmas_karras(n=self.steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, rho=self.rho, device=self.device)
380
+ self.sigmas_exponential = get_sigmas_exponential(n=self.steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, device=self.device)
381
+ #self.log(f"[DEBUG] Randomized Values: sigma_min={self.sigma_min}, sigma_max={self.sigma_max}")
382
+
383
+ # Match lengths of sigma sequences
384
+ target_length = min(len(self.sigmas_karras), len(self.sigmas_exponential))
385
+ self.sigmas_karras = self.sigmas_karras[:target_length]
386
+ self.sigmas_exponential = self.sigmas_exponential[:target_length]
387
+
388
+ self.log(f"Generated sigma sequences. Karras: {self.sigmas_karras}, Exponential: {self.sigmas_exponential}")
389
+
390
+ if self.sigmas_karras is None:
391
+ raise ValueError(f"Sigmas Karras:{self.sigmas_karras} Failed to generate or assign sigmas correctly.")
392
+ if self.sigmas_exponential is None:
393
+ raise ValueError(f"Sigmas Exponential: {self.sigmas_exponential} Failed to generate or assign sigmas correctly.")
394
+ self.sigmas_karras = torch.zeros(self.steps).to(self.device)
395
+ self.sigmas_exponential = torch.zeros(self.steps).to(self.device)
396
+ try:
397
+ pass
398
+ except Exception as e:
399
+ self.log(f"Error generating sigmas: {e}")
400
+
401
+ # Expand sigma_max slightly to account for smoother transitions
402
+ self.sigma_max = self.sigma_max * 1.1
403
+
404
+ # Define progress and initialize blend factor
405
+ self.progress = torch.linspace(0, 1, len(self.sigmas_karras)).to(self.device)
406
+ meaningful_steps = len(self.progress) - 1 # Adjust for appended zero step
407
+ self.log(f"[Progress Initialized] Created progress tensor with {meaningful_steps} steps (excluding terminal step) on device: {self.device}")
408
+
409
+
410
+
411
+ sigs = torch.zeros_like(self.sigmas_karras).to(self.device)
412
+ self.log(f"[Initialization] Pre-allocated empty sigma sequence with shape: torch.Size([{meaningful_steps}]) on device: {self.device}")
413
+
414
+
415
+ # Iterate through each step, dynamically adjust blend factor, step size, and noise scaling
416
+ if len(self.sigmas_karras) < len(self.sigmas_exponential):
417
+ # Pad `sigmas_karras` with the last value
418
+ padding_karras = torch.full((len(self.sigmas_exponential) - len(self.sigmas_karras),), self.sigmas_karras[-1]).to(self.sigmas_karras.self.device)
419
+ self.sigmas_karras = torch.cat([self.sigmas_karras, padding_karras])
420
+ elif len(self.sigmas_karras) > len(self.sigmas_exponential):
421
+ # Pad `sigmas_exponential` with the last value
422
+ padding_exponential = torch.full((len(self.sigmas_karras) - len(self.sigmas_exponential),), self.sigmas_exponential[-1]).to(self.sigmas_exponential.device)
423
+ self.sigmas_exponential = torch.cat([self.sigmas_exponential, padding_exponential])
424
+ #self.log(f"[Padding] Adjusted sigma sequences to matching length: {meaningful_steps + 1} steps.")
425
+ for i in range(len(self.sigmas_karras)):
426
+
427
+ # Adaptive step size and blend factor calculations
428
+ self.step_size = self.initial_step_size * (1 - self.progress[i]) + self.final_step_size * self.progress[i] * self.step_size_factor # 0.8 default value Adjusted to avoid over-smoothing
429
+
430
+ self.dynamic_blend_factor = self.start_blend * (1 - self.progress[i]) + self.end_blend * self.progress[i]
431
+ self.noise_scale = self.initial_noise_scale * (1 - self.progress[i]) + self.final_noise_scale * self.progress[i] * self.noise_scale_factor
432
+ smooth_blend = torch.sigmoid((self.dynamic_blend_factor - 0.5) * self.smooth_blend_factor) # Increase scaling factor to smooth transitions more
433
+
434
+ # Compute blended sigma values
435
+ blended_sigma = self.sigmas_karras[i] * (1 - smooth_blend) + self.sigmas_exponential[i] * smooth_blend
436
+
437
+ # Apply step size and noise scaling
438
+ sigs[i] = blended_sigma * self.step_size * self.noise_scale
439
+
440
+ # Optional: Adaptive sharpening based on sigma values
441
+ self.sharpen_mask = torch.where(sigs < self.sigma_min * 1.5, self.sharpness, 1.0).to(self.device)
442
+ sharpen_indices = torch.where(self.sharpen_mask < 1.0)[0].tolist()
443
+ self.log(f"[Sharpen Mask] Sharpening applied at steps: {sharpen_indices}")
444
+ #self.log(f"sharpen_mask created {self.sharpen_mask} with device {self.device}" )
445
+ sigs = sigs * self.sharpen_mask
446
+ #self.log(f"Sigs after sharpen_mask: {sigs}")
447
+
448
+ # Implement early stop criteria based on sigma convergence
449
+ change = torch.abs(sigs[1:] - sigs[:-1])
450
+ if torch.all(change < self.early_stopping_threshold):
451
+ self.log("Early stopping criteria met." )
452
+ return sigs[:len(change) + 1].to(self.device)
453
+
454
+ if torch.isnan(sigs).any() or torch.isinf(sigs).any():
455
+ raise ValueError("Invalid sigma values detected (NaN or Inf).")
456
+
457
+ return sigs.to(self.device)
sd_simple_kes_v1/validate_config.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, Any
2
+
3
+
4
+ RANDOMIZATION_TYPE_ALIASES = {
5
+ 'symmetric': 'symmetric', 'sym': 'symmetric', 's': 'symmetric',
6
+ 'asymmetric': 'asymmetric', 'assym': 'asymmetric', 'a': 'asymmetric',
7
+ 'off': 'off', 'none': 'off'
8
+ }
9
+
10
+ DEFAULT_RANDOMIZATION_TYPE = 'asymmetric'
11
+ DEFAULT_RANDOMIZATION_PERCENT = 0.2
12
+
13
+
14
+ def validate_config(config: Dict[str, Any], logger: Optional[Any] = None) -> Dict[str, Any]:
15
+ updated_config = config.copy()
16
+
17
+ def log(message):
18
+ if logger:
19
+ logger.log(message)
20
+ else:
21
+ print(message)
22
+
23
+ # Correction for negative base values
24
+ for key, value in config.items():
25
+ if isinstance(value, (int, float)) and not key.endswith(('_rand', '_rand_min', '_rand_max', '_randomization_percent')): # Skip randomization booleans and ranges
26
+ if value < 0:
27
+ updated_config[key] = abs(value)
28
+ log(f"[Config Correction] {key} was negative. Converted to absolute value: {updated_config[key]}")
29
+
30
+ # Existing randomization validation...
31
+ for key, value in config.items():
32
+ if key.endswith('_rand'):
33
+ base_key = key.replace('_rand', '')
34
+
35
+ if not isinstance(value, bool):
36
+ updated_config[key] = False
37
+ log(f"[Config Correction] {key} was not boolean. Set to False.")
38
+
39
+ type_key = f"{base_key}_randomization_type"
40
+ if type_key not in config:
41
+ updated_config[type_key] = 'asymmetric'
42
+ log(f"[Config Correction] {type_key} missing. Set to 'asymmetric'.")
43
+
44
+ percent_key = f"{base_key}_randomization_percent"
45
+ if percent_key not in config:
46
+ updated_config[percent_key] = 0.2
47
+ log(f"[Config Correction] {percent_key} missing. Set to 0.2.")
48
+
49
+ min_key = f"{base_key}_rand_min"
50
+ max_key = f"{base_key}_rand_max"
51
+ if base_key in config:
52
+ base_value = updated_config[base_key] # Updated with absolute value if needed
53
+ percent = updated_config[percent_key]
54
+
55
+ if min_key not in config:
56
+ updated_config[min_key] = base_value * (1 - percent)
57
+ log(f"[Config Correction] {min_key} missing. Auto-calculated from base.")
58
+
59
+ if max_key not in config:
60
+ updated_config[max_key] = base_value * (1 + percent)
61
+ log(f"[Config Correction] {max_key} missing. Auto-calculated from base.")
62
+
63
+ log("[Config Validation] Config validated and missing values filled successfully.")
64
+ return updated_config
65
+