config: name: last process: - datasets: - cache_latents_to_disk: true caption_ext: txt folder_path: /cache/images/61f0135e-5d7a-47b2-95f8-5e22b4eae61b/1_lora style is_reg: false resolution: - 512 - 768 - 1024 device: cuda model: arch: qwen_image low_vram: true name_or_path: /cache/models/gradients-io-tournaments--Qwen-Image-Jib-Mix qtype: float8 qtype_te: float8 quantize: true quantize_te: true network: linear: 32 linear_alpha: 32 type: lora save: dtype: bf16 max_step_saves_to_keep: 4 save_every: 250 save_format: diffusers train: batch_size: 1 cache_text_embeddings: false caption_dropout_rate: 0.05 cfg_scale: 3.5 do_cfg: true dtype: bf16 ema_config: ema_decay: 0.999 use_ema: true gradient_checkpointing: true huber_c: 0.1 loss_type: huber lr: 0.00012 model_name: Qwen2-VL-7B-Instruct-Jib noise_scheduler: flowmatch optimizer: adamw8bit save_every_n_epochs: 60 steps: 1600 timestep_type: weighted train_text_encoder: false train_unet: true training_folder: /app/checkpoints/61f0135e-5d7a-47b2-95f8-5e22b4eae61b/ipunktest-10 type: diffusion_trainer job: extension meta: name: qwen_image_lora_jordansky version: '1.6'