naumnaum commited on
Commit
45183b4
·
verified ·
1 Parent(s): adab037

Upload 13rep-16d-16a-12ep-0001lr-2bs-adamw8bit/config.json with huggingface_hub

Browse files
13rep-16d-16a-12ep-0001lr-2bs-adamw8bit/config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pretrained_model_name_or_path": "/home/ubuntu/test/models/flux1-dev.safetensors",
3
+ "ae": "/home/ubuntu/test/models/ae.safetensors",
4
+ "t5xxl": "/home/ubuntu/test/models/t5xxl_fp16.safetensors",
5
+ "clip_l": "/home/ubuntu/test/models/clip_l.safetensors",
6
+ "network_dim": 16,
7
+ "network_alpha": 16,
8
+ "optimizer_type": "Adamw8bit",
9
+ "unet_lr": 0.0001,
10
+ "save_every_n_epochs": 1,
11
+ "save_precision": "fp16",
12
+ "resolution": "512,512",
13
+ "train_batch_size": 2,
14
+ "epoch": 12,
15
+ "max_train_steps": 1250,
16
+ "apply_t5_attn_mask": false,
17
+ "bucket_no_upscale": true,
18
+ "bucket_reso_steps": 64,
19
+ "cache_latents": true,
20
+ "cache_latents_to_disk": true,
21
+ "cache_text_encoder_outputs": true,
22
+ "cache_text_encoder_outputs_to_disk": true,
23
+ "caption_extension": ".txt",
24
+ "clip_skip": 1,
25
+ "discrete_flow_shift": 3.1582,
26
+ "dynamo_backend": "no",
27
+ "enable_bucket": true,
28
+ "full_bf16": true,
29
+ "gradient_accumulation_steps": 1,
30
+ "gradient_checkpointing": true,
31
+ "guidance_scale": 1.0,
32
+ "highvram": true,
33
+ "huber_c": 0.1,
34
+ "huber_schedule": "snr",
35
+ "huggingface_repo_type": "model",
36
+ "huggingface_repo_visibility": "public",
37
+ "save_state_to_huggingface": true,
38
+ "loss_type": "l2",
39
+ "lr_scheduler": "cosine_with_restarts",
40
+ "lr_scheduler_args": [],
41
+ "lr_scheduler_num_cycles": 3,
42
+ "lr_scheduler_power": 1,
43
+ "min_bucket_reso": 256,
44
+ "max_bucket_reso": 2048,
45
+ "max_data_loader_n_workers": 0,
46
+ "max_grad_norm": 1,
47
+ "max_timestep": 1000,
48
+ "metadata_author": "unreal app",
49
+ "metadata_description": "IDK",
50
+ "metadata_license": "IDK",
51
+ "metadata_tags": "IDK",
52
+ "metadata_title": "IDK",
53
+ "min_snr_gamma": 5,
54
+ "mixed_precision": "bf16",
55
+ "model_prediction_type": "raw",
56
+ "network_args": [
57
+ "train_double_block_indices=all",
58
+ "train_single_block_indices=all"
59
+ ],
60
+ "network_module": "networks.lora_flux",
61
+ "network_train_unet_only": true,
62
+ "noise_offset": 0.1,
63
+ "noise_offset_type": "Original",
64
+ "optimizer_args": [],
65
+ "prior_loss_weight": 1,
66
+ "sample_sampler": "euler",
67
+ "save_model_as": "safetensors",
68
+ "sdpa": true,
69
+ "seed": 42,
70
+ "t5xxl_max_token_length": 512,
71
+ "text_encoder_lr": [],
72
+ "timestep_sampling": "sigmoid",
73
+ "output_dir": "/home/ubuntu/test/cache/rita-lambda-test/model/13rep-16d-16a-12ep-0001lr-2bs-adamw8bit",
74
+ "logging_dir": "/home/ubuntu/test/cache/rita-lambda-test/log/13rep-16d-16a-12ep-0001lr-2bs-adamw8bit",
75
+ "train_data_dir": "/home/ubuntu/test/cache/rita-lambda-test/img/13rep-16d-16a-12ep-0001lr-2bs-adamw8bit",
76
+ "huggingface_repo_id": "unrealme/rita-lambda-test",
77
+ "huggingface_path_in_repo": "13rep-16d-16a-12ep-0001lr-2bs-adamw8bit",
78
+ "output_name": "lora"
79
+ }