config: name: last process: - datasets: - cache_latents_to_disk: true caption_ext: txt folder_path: /cache/images/61f0135e-5d7a-47b2-95f8-5e22b4eae61b/1_lora style is_reg: false resolution: - 512 - 768 - 1024 device: cuda model: arch: qwen_image low_vram: true name_or_path: /cache/models/gradients-io-tournaments--Qwen-Image-Jib-Mix qtype: float8 qtype_te: float8 quantize: true quantize_te: true network: conv_alpha: 32 conv_rank: 64 linear: 128 linear_alpha: 64 type: lora save: dtype: bf16 max_step_saves_to_keep: 4 save_every: 250 save_format: diffusers train: batch_size: 1 cache_text_embeddings: false caption_dropout_rate: 0.15 cfg_scale: 3.5 do_cfg: true dtype: bf16 ema_config: ema_decay: 0.999 use_ema: true gradient_checkpointing: true huber_c: 0.1 loss_type: huber lr: 0.00012 model_name: Qwen2-VL-7B-Instruct-Jib noise_scheduler: flowmatch optimizer: adamw8bit optimizer_params: weight_decay: 0.0002 save_every_n_epochs: 60 steps: 3000 timestep_type: weighted train_text_encoder: false train_unet: true training_folder: /app/checkpoints/61f0135e-5d7a-47b2-95f8-5e22b4eae61b/ipunktest-1 type: diffusion_trainer job: extension meta: name: qwen_image_lora_jordansky version: '1.6'