trained_lora / config.yaml
bb1070's picture
Upload config.yaml with huggingface_hub
63be438 verified
job: extension
config:
name: trained_lora
process:
- type: sd_trainer
training_folder: /home/bhara/ai-toolkit/output
device: cuda:0
trigger_word: UNST
network:
type: lora
linear: 16
linear_alpha: 16
network_kwargs:
only_if_contains:
- transformer.single_transformer_blocks.'7.proj_out
- transformer.single_transformer_blocks.12.proj_out
- transformer.single_transformer_blocks.16.proj_out
- transformer.single_transformer_blocks.20'.proj_out
save:
dtype: float16
save_every: 100
max_step_saves_to_keep: 4
datasets:
- folder_path: /home/bhara/ai-toolkit/dataset/images_and_captions
caption_ext: txt
caption_dropout_rate: 0.05
cache_latents_to_disk: false
resolution:
- 512
- 768
- 1024
train:
batch_size: 1
steps: 10
lr: 0.0004
gradient_checkpointing: true
optimizer: adamw8bit
disable_sampling: true
dtype: bf16
model:
name_or_path: black-forest-labs/FLUX.1-dev
is_flux: true
quantize: false
sample:
sample_every: 10000
width: 1024
height: 1024
prompts: []
seed: 42
sample_steps: 28