DraconicDragon commited on
Commit
c2d4785
·
verified ·
1 Parent(s): 17d6e28

Upload 2 files

Browse files
ChenkinRF-0.3_e4/config.toml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ persistent_data_loader_workers = true
2
+ pretrained_model_name_or_path = "G:/ImgGen/xl/noob/Chenkin-RF-0.3-000004-final.safetensors"
3
+ full_bf16 = true
4
+ mixed_precision = "bf16"
5
+ gradient_checkpointing = true
6
+ gradient_accumulation_steps = 6
7
+ seed = 80085
8
+ max_data_loader_n_workers = 1
9
+ max_token_length = 225
10
+ prior_loss_weight = 1.0
11
+ xformers = true
12
+ max_train_epochs = 26
13
+ cache_latents = true
14
+ training_comment = "ck rf ep4 final ylw d7 with trigger"
15
+ protected_tags_file = "E:/sd-scripts/protected_tags/ylw_d7-trigger.txt"
16
+ vae_batch_size = 4
17
+ flow_model = true
18
+ flow_use_ot = true
19
+ flow_timestep_distribution = "logit_normal"
20
+ flow_uniform_static_ratio = 2.0
21
+ flow_logit_mean = -0.2
22
+ flow_logit_std = 1.5
23
+ network_dim = 32
24
+ network_alpha = 32.0
25
+ min_timestep = 0
26
+ max_timestep = 1000
27
+ network_train_unet_only = true
28
+ network_module = "lycoris.kohya"
29
+ network_args = ["conv_dim=32", "conv_alpha=32.0", "algo=locon"]
30
+ optimizer_type = "AdamW8bit"
31
+ lr_scheduler = "constant_with_warmup"
32
+ loss_type = "l2"
33
+ learning_rate = 0.00025
34
+ unet_lr = 0.00025
35
+ max_grad_norm = 0.0
36
+ optimizer_args = ["weight_decay=0.01", "betas=(0.9, 0.999)"]
37
+ output_dir = "E:/LoRA_Easy_Training_Scripts/outputs"
38
+ output_name = "YLW_d7_CKN-RF4Fin_v1_edm2"
39
+ save_precision = "bf16"
40
+ save_model_as = "safetensors"
41
+ save_last_n_epochs = 4
42
+ tag_occurrence = true
43
+ save_every_n_epochs = 1
44
+ save_toml = true
45
+ save_toml_location = "E:/LoRA_Easy_Training_Scripts/train_configs"
46
+ logging_dir = "E:/LoRA_Easy_Training_Scripts/tb_logs"
47
+ log_with = "tensorboard"
48
+ edm2_loss_weighting = true
49
+ edm2_loss_weighting_optimizer = "LoraEasyCustomOptimizer.fmarscrop.FMARSCropV2ExMachina"
50
+ edm2_loss_weighting_optimizer_lr = "5e-3"
51
+ edm2_loss_weighting_optimizer_args = "{'update_strategy':'cautious', 'gamma':0.0, 'betas':(0.99,0.9999,0.999), 'adaptive_clip':0}"
52
+ edm2_loss_weighting_lr_scheduler = true
53
+ edm2_loss_weighting_lr_scheduler_warmup_percent = 0.1
54
+ edm2_loss_weighting_lr_scheduler_constant_percent = 0.9
55
+ edm2_loss_weighting_generate_graph = true
56
+ edm2_loss_weighting_generate_graph_output_dir = "E:/LoRA_Easy_Training_Scripts/outputs/ckrf4fin_ryusho_edm2"
57
+ edm2_loss_weighting_generate_graph_every_x_steps = 50
58
+ edm2_loss_weighting_generate_graph_y_limit = 100
59
+ edm2_loss_weighting_initial_weights = ""
60
+ edm2_loss_weighting_num_channels = 448
61
+ edm2_loss_weighting_importance_weighting_max = 10
62
+ lr_warmup_steps = 13
ChenkinRF-0.3_e4/dataset.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [general]
2
+ resolution = 1024
3
+ batch_size = 4
4
+ enable_bucket = true
5
+ min_bucket_reso = 256
6
+ max_bucket_reso = 3072
7
+ bucket_reso_steps = 64
8
+
9
+ [[datasets]]
10
+
11
+ [[datasets.subsets]]
12
+ caption_dropout_rate = 0.1
13
+ caption_extension = ".txt"
14
+ caption_tag_dropout_rate = 0.12
15
+ image_dir = "E:/Datasets/yao_liao_wang-v7_trigger"
16
+ num_repeats = 2
17
+ random_crop_padding_percent = 0.06
18
+ shuffle_caption = true