waveydaveygravy commited on
Commit
f2a2e80
·
1 Parent(s): 7eef282

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +63 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ token is: fBFt
2
+ 8 instance images
3
+
4
+
5
+
6
+ STEPS = 1600 #@param {type:"slider", min:0, max:10000, step:10}
7
+ BATCH_SIZE = 6 #@param {type:"slider", min:0, max:128, step:1}
8
+ FP_16 = True #@param {type:"boolean"}
9
+
10
+ #@markdown ----
11
+ #@markdown UNET PARAMS
12
+ LEARNING_RATE = 3e-4 #@param {type:"number"}
13
+
14
+ #@markdown ----
15
+ TRAIN_TEXT_ENCODER = True #@param {type:"boolean"}
16
+ #@markdown TEXT ENCODER PARAMS
17
+ LEARNING_RATE_TEXT_ENCODER = 5e-5 #@param {type:"number"}
18
+
19
+ NEW_LEARNING_RATE = LEARNING_RATE / BATCH_SIZE
20
+ NEW_LEARNING_RATE_TEXT_ENCODER = LEARNING_RATE_TEXT_ENCODER / BATCH_SIZE
21
+
22
+ if FP_16:
23
+ fp_16_arg = "fp16"
24
+ else:
25
+ fp_16_arg = "no"
26
+
27
+ if TRAIN_TEXT_ENCODER:
28
+ command = (f'accelerate launch lora/training_scripts/train_lora_dreambooth.py '
29
+ f'--pretrained_model_name_or_path="{PRETRAINED_MODEL}" '
30
+ f'--instance_data_dir="{INSTANCE_DIR}" '
31
+ f'--output_dir="{OUTPUT_DIR}" '
32
+ f'--instance_prompt="{PROMPT}" '
33
+ f'--resolution=512 '
34
+ f'--use_8bit_adam '
35
+ f'--mixed_precision="{fp_16_arg}" '
36
+ f'--train_batch_size=1 '
37
+ f'--gradient_accumulation_steps=1 '
38
+ f'--learning_rate={NEW_LEARNING_RATE} '
39
+ f'--lr_scheduler="cosine" '
40
+ f'--lr_warmup_steps=0 '
41
+ f'--max_train_steps={STEPS} '
42
+ f'--train_text_encoder '
43
+ f'--lora_rank=16 '
44
+ f'--learning_rate_text={NEW_LEARNING_RATE_TEXT_ENCODER}')
45
+ else:
46
+ command = (f'accelerate launch lora/training_scripts/train_lora_dreambooth.py '
47
+ f'--pretrained_model_name_or_path="{PRETRAINED_MODEL}" '
48
+ f'--instance_data_dir="{INSTANCE_DIR}" '
49
+ f'--output_dir="{OUTPUT_DIR}" '
50
+ f'--instance_prompt="{PROMPT}" '
51
+ f'--resolution=512 '
52
+ f'--use_8bit_adam '
53
+ f'--mixed_precision="{fp_16_arg}" '
54
+ f'--train_batch_size=1 '
55
+ f'--gradient_accumulation_steps=1 '
56
+ f'--learning_rate={NEW_LEARNING_RATE} '
57
+ f'--lr_scheduler="constant" '
58
+ f'--lr_warmup_steps=0 '
59
+ f'--lora_rank=16 '
60
+ f'--max_train_steps={STEPS} '
61
+ f'--learning_rate_text={NEW_LEARNING_RATE_TEXT_ENCODER}')
62
+ !rm -rf $INSTANCE_DIR/.ipynb_checkpoints
63
+ !{command}