LoCO_depth / config.yaml
anhth's picture
Upload config.yaml with huggingface_hub
7a7110b verified
dtype: bfloat16
flux_path: black-forest-labs/FLUX.1-dev
model:
independent_condition: false
train:
accumulate_grad_batches: 1
condition_type: depth
dataloader_workers: 5
dataset:
cache_name: data_512_2M
condition_size:
- 512
- 512
drop_image_prob: 0.1
drop_text_prob: 0.1
target_size:
- 512
- 512
type: img
urls:
- https://huggingface.co/datasets/jackyhate/text-to-image-2M/resolve/main/data_512_2M/data_000045.tar
- https://huggingface.co/datasets/jackyhate/text-to-image-2M/resolve/main/data_512_2M/data_000046.tar
gradient_checkpointing: false
max_steps: -1
optimizer:
params:
lr: 1
safeguard_warmup: true
use_bias_correction: true
weight_decay: 0.01
type: Prodigy
rotation_adapter_config:
num_rotations: 8
r: 1
target_modules: (.*x_embedder|.*(?<!single_)transformer_blocks\.[0-9]+\.norm1\.linear|.*(?<!single_)transformer_blocks\.[0-9]+\.attn\.to_k|.*(?<!single_)transformer_blocks\.[0-9]+\.attn\.to_q|.*(?<!single_)transformer_blocks\.[0-9]+\.attn\.to_v|.*(?<!single_)transformer_blocks\.[0-9]+\.attn\.to_out\.0|.*(?<!single_)transformer_blocks\.[0-9]+\.ff\.net\.2|.*single_transformer_blocks\.[0-9]+\.norm\.linear|.*single_transformer_blocks\.[0-9]+\.proj_mlp|.*single_transformer_blocks\.[0-9]+\.proj_out|.*single_transformer_blocks\.[0-9]+\.attn.to_k|.*single_transformer_blocks\.[0-9]+\.attn.to_q|.*single_transformer_blocks\.[0-9]+\.attn.to_v|.*single_transformer_blocks\.[0-9]+\.attn.to_out)
sample_interval: 100
save_interval: 1000
save_path: runs
wandb:
project: OminiControlRotation