job: extension config: name: anha process: - type: diffusion_trainer training_folder: /workspace/ai-toolkit/output sqlite_db_path: /workspace/ai-toolkit/aitk_db.db device: cuda trigger_word: null performance_log_every: 10 network: type: lora linear: 16 linear_alpha: 16 conv: 16 conv_alpha: 16 lokr_full_rank: true lokr_factor: -1 network_kwargs: ignore_if_contains: [] save: dtype: bf16 save_every: 330 max_step_saves_to_keep: 8 save_format: diffusers push_to_hub: false datasets: - folder_path: /workspace/ai-toolkit/datasets/anha mask_path: null mask_min_value: 0.1 default_caption: '' caption_ext: txt caption_dropout_rate: 0.05 cache_latents_to_disk: false is_reg: false network_weight: 1 resolution: - 512 - 768 - 1024 controls: [] shrink_video_to_frames: true num_frames: 1 do_i2v: true flip_x: false flip_y: false train: batch_size: 1 bypass_guidance_embedding: false steps: 3300 gradient_accumulation: 1 train_unet: true train_text_encoder: false gradient_checkpointing: true noise_scheduler: flowmatch optimizer: adamw8bit timestep_type: sigmoid content_or_style: balanced optimizer_params: weight_decay: 0.0001 unload_text_encoder: false cache_text_embeddings: false lr: 0.0001 ema_config: use_ema: false ema_decay: 0.99 skip_first_sample: false force_first_sample: false disable_sampling: false dtype: bf16 diff_output_preservation: false diff_output_preservation_multiplier: 1 diff_output_preservation_class: person switch_boundary_every: 1 loss_type: mse model: name_or_path: ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16 quantize: true qtype: qfloat8 quantize_te: true qtype_te: qfloat8 arch: wan22_14b:t2v low_vram: true model_kwargs: train_high_noise: false train_low_noise: true layer_offloading: false layer_offloading_text_encoder_percent: 1 layer_offloading_transformer_percent: 1 sample: sampler: flowmatch sample_every: 250 width: 1024 height: 1024 samples: [] neg: '' seed: 42 walk_seed: true guidance_scale: 4 sample_steps: 25 num_frames: 1 fps: 16 meta: name: anha version: '1.0'