naumnaum commited on
Commit
08d85f2
·
verified ·
1 Parent(s): 964f335

Upload luba_v2_19-09/config_lora-20240919-101139.toml with huggingface_hub

Browse files
luba_v2_19-09/config_lora-20240919-101139.toml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ae = "/workspace/models/ae.safetensors"
2
+ bucket_no_upscale = true
3
+ bucket_reso_steps = 64
4
+ cache_latents = true
5
+ cache_latents_to_disk = true
6
+ cache_text_encoder_outputs = true
7
+ cache_text_encoder_outputs_to_disk = true
8
+ caption_extension = ".txt"
9
+ clip_l = "/workspace/models/clip_l.safetensors"
10
+ clip_skip = 1
11
+ discrete_flow_shift = 3.0
12
+ dynamo_backend = "no"
13
+ epoch = 50
14
+ full_bf16 = true
15
+ gradient_accumulation_steps = 1
16
+ gradient_checkpointing = true
17
+ guidance_scale = 1.0
18
+ highvram = true
19
+ huber_c = 0.1
20
+ huber_schedule = "snr"
21
+ logging_dir = "/workspace/train_folder/luba-19-09-v1/log"
22
+ loss_type = "l2"
23
+ lr_scheduler = "constant"
24
+ lr_scheduler_args = []
25
+ lr_scheduler_num_cycles = 1
26
+ lr_scheduler_power = 1
27
+ max_bucket_reso = 2048
28
+ max_data_loader_n_workers = 0
29
+ max_timestep = 1000
30
+ max_train_steps = 1500
31
+ mem_eff_save = true
32
+ min_bucket_reso = 256
33
+ mixed_precision = "bf16"
34
+ model_prediction_type = "raw"
35
+ network_alpha = 64
36
+ network_args = [ "train_double_block_indices=all", "train_single_block_indices=all",]
37
+ network_dim = 64
38
+ network_module = "networks.lora_flux"
39
+ network_train_unet_only = true
40
+ noise_offset_type = "Original"
41
+ optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False", "weight_decay=0.01",]
42
+ optimizer_type = "Adafactor"
43
+ output_dir = "/workspace/train_folder/luba-19-09-v1/model"
44
+ output_name = "19_09_unrlmmdl_luba_v1"
45
+ pretrained_model_name_or_path = "/workspace/models/flux1-dev.safetensors"
46
+ prior_loss_weight = 1
47
+ resolution = "512,512"
48
+ sample_prompts = "/workspace/train_folder/luba-19-09-v1/model/sample/prompt.txt"
49
+ sample_sampler = "euler_a"
50
+ save_every_n_epochs = 20
51
+ save_model_as = "safetensors"
52
+ save_precision = "float"
53
+ sdpa = true
54
+ t5xxl = "/workspace/models/t5xxl_fp16.safetensors"
55
+ t5xxl_max_token_length = 512
56
+ text_encoder_lr = []
57
+ timestep_sampling = "sigmoid"
58
+ train_batch_size = 1
59
+ train_data_dir = "/workspace/train_folder/luba-19-09-v1/img"
60
+ unet_lr = 0.0001
61
+ wandb_run_name = "19_09_unrlmmdl_luba_v1"