Lachter0808 commited on
Commit
a901055
·
verified ·
1 Parent(s): 0f004d8

Upload 2 files

Browse files
Files changed (2) hide show
  1. adapter_config.json +13 -0
  2. config.json +55 -0
adapter_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "peft_type": "LORA",
3
+ "r": 16,
4
+ "lora_alpha": 16,
5
+ "bias": "none",
6
+ "target_modules": [
7
+ "attn.to_q",
8
+ "attn.to_k",
9
+ "attn.to_v",
10
+ "attn.to_out.0"
11
+ ],
12
+ "task_type": "DIFFUSION"
13
+ }
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "project": "fluxdev-lora",
3
+ "base_model": "black-forest-labs/FLUX.1-dev",
4
+ "weights_hint": "flux1-dev-fp8.safetensors",
5
+ "task": "dreambooth-lora",
6
+ "resolution": 768,
7
+ "rank": 16,
8
+ "lora_alpha": 16,
9
+ "lora_layers": "attn.to_k,attn.to_q,attn.to_v,attn.to_out.0",
10
+ "train_transformer_frac": 1.0,
11
+ "train_text_encoder_ti": true,
12
+ "enable_t5_ti": false,
13
+ "train_text_encoder_ti_frac": 0.25,
14
+ "optimizer": "prodigy",
15
+ "learning_rate": 1.0,
16
+ "lr_scheduler": "constant",
17
+ "max_train_steps": 1000,
18
+ "train_batch_size": 1,
19
+ "gradient_accumulation_steps": 1,
20
+ "mixed_precision": "bf16",
21
+ "guidance_scale": 1,
22
+ "seed": 42,
23
+
24
+ "dataset": {
25
+ "type": "image-caption",
26
+ "path_or_repo": "REPLACE_WITH_DATASET_OR_PATH",
27
+ "image_column": "image",
28
+ "caption_column": "caption",
29
+ "instance_prompt": "TOK",
30
+ "token_abstraction": "TOK",
31
+ "repeats": 1,
32
+ "shuffle": true
33
+ },
34
+
35
+ "logging": {
36
+ "wandb": false,
37
+ "report_to": "tensorboard",
38
+ "logging_steps": 25
39
+ },
40
+
41
+ "checkpointing": {
42
+ "output_dir": "outputs/fluxdev-lora",
43
+ "save_steps": 200,
44
+ "push_to_hub": true,
45
+ "hub_model_id": "Lachter0808/fluxdev-lora",
46
+ "save_safetensors": true
47
+ },
48
+
49
+ "hardware": {
50
+ "gradient_checkpointing": true,
51
+ "use_8bit_adam": false
52
+ },
53
+
54
+ "notes": "Config afgestemd op diffusers Advanced Flux Dreambooth LoRA; attention-only LoRA, pivotal tuning op CLIP. Gebruik het base_model voor training; fp8-bestand is een referentie voor inferentie."
55
+ }