Vikitai commited on
Commit
3a01647
·
verified ·
1 Parent(s): 874cc96

Upload config_lora-20250503-051050.toml with huggingface_hub

Browse files
Files changed (1) hide show
  1. config_lora-20250503-051050.toml +63 -0
config_lora-20250503-051050.toml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ae = "/workspace/ae.safetensors"
2
+ apply_t5_attn_mask = true
3
+ bucket_no_upscale = true
4
+ bucket_reso_steps = 64
5
+ cache_latents = true
6
+ cache_latents_to_disk = true
7
+ cache_text_encoder_outputs = true
8
+ caption_extension = ".txt"
9
+ clip_l = "/workspace/clip_l.safetensors"
10
+ clip_skip = 1
11
+ discrete_flow_shift = 3.0
12
+ dynamo_backend = "no"
13
+ enable_bucket = true
14
+ epoch = 10
15
+ gradient_accumulation_steps = 1
16
+ gradient_checkpointing = true
17
+ guidance_scale = 3.5
18
+ huber_c = 0.1
19
+ huber_scale = 1
20
+ huber_schedule = "snr"
21
+ logging_dir = "/workspace/formatted_training/log"
22
+ loss_type = "l2"
23
+ lr_scheduler = "constant"
24
+ lr_scheduler_args = []
25
+ lr_scheduler_num_cycles = 1
26
+ lr_scheduler_power = 1
27
+ max_bucket_reso = 2048
28
+ max_data_loader_n_workers = 0
29
+ max_grad_norm = 1
30
+ max_timestep = 1000
31
+ max_train_steps = 100
32
+ mem_eff_attn = true
33
+ mem_eff_save = true
34
+ min_bucket_reso = 256
35
+ mixed_precision = "fp16"
36
+ model_prediction_type = "raw"
37
+ network_alpha = 128
38
+ network_args = [ "train_double_block_indices=all", "train_single_block_indices=all",]
39
+ network_dim = 128
40
+ network_module = "networks.lora_flux"
41
+ network_train_unet_only = true
42
+ noise_offset_type = "Original"
43
+ optimizer_args = []
44
+ optimizer_type = "Adafactor"
45
+ output_dir = "/workspace/formatted_training/model"
46
+ output_name = "mi_modelo_lora_v1"
47
+ pretrained_model_name_or_path = "/workspace/flux1-dev.safetensors"
48
+ prior_loss_weight = 1
49
+ resolution = "512,640"
50
+ sample_prompts = "/workspace/formatted_training/model/sample/prompt.txt"
51
+ sample_sampler = "euler_a"
52
+ save_every_n_epochs = 5
53
+ save_model_as = "safetensors"
54
+ save_precision = "fp16"
55
+ t5xxl = "/workspace/t5xxl_fp16.safetensors"
56
+ t5xxl_max_token_length = 512
57
+ text_encoder_lr = []
58
+ timestep_sampling = "sigmoid"
59
+ train_batch_size = 2
60
+ train_data_dir = "/workspace/formatted_training/img"
61
+ unet_lr = 1e-5
62
+ wandb_run_name = "mi_modelo_lora_v1"
63
+ xformers = true