File size: 1,435 Bytes
3b8d50d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# Z-Image LoRA Training Configuration Example

# Model configuration
model:
  name_or_path: "Tongyi-MAI/Z-Image"  # or path to local model
  
# LoRA configuration
lora:
  prefix: "diffusion_model."
  rank: 32
  alpha: 32
  target_modules:
    - "to_q"
    - "to_k"
    - "to_v"
    - "to_out.0"
    - "feed_forward.w1"
    - "feed_forward.w2"
    - "feed_forward.w3"
    - "adaLN_modulation.0"

# Dataset configuration
dataset:
  path: "[dataset_location]/[person_to_train]"
  trigger: "[instance_token] [class_token]"
  default_caption: "photo of a [class_token]"
  repeats: 1
  resolution: 512
  center_crop: true
  random_flip: false
  num_workers: 0

# Training configuration
train:
  batch_size: 1
  gradient_accumulation_steps: 1
  num_epochs: 100
  optimizer: "adamw8bit"
  learning_rate: 1e-4
  adam_beta1: 0.9
  adam_beta2: 0.999
  adam_epsilon: 1e-8
  weight_decay: 0.01
  weight_decay_exclude_lora: true
  timestep_weighting: "none"
  do_differential_guidance: true
  differential_guidance_scale: 3.0
  unconditional_prompt: ""
  dynamic_noise_offset: true
  noise_multiplier: 1.0
  random_noise_multiplier: 0.0
  random_noise_shift: 0.0
  latent_multiplier: 1.0
  noisy_latent_multiplier: 1.0
  max_grad_norm: 1.0
  save_every: 500

# Output configuration
output:
  path: "./output/[person_to_train]"

# Logging configuration
logging:
  level: "INFO"

# Training settings
dtype: "bfloat16"
mixed_precision: "bf16"
seed: 42