tmp_ckpt / exp /final.yml
Shitao's picture
Upload folder using huggingface_hub
b3f0a26 verified
name: final
seed: 998244353
device_specific_seed: true
workder_specific_seed: true
data:
data_path: train_data/chenyuan/final.yml
image_size: 512
apply_chat_template: true
max_token_len: 4000
img_scale_num: 16
use_text_loss: false
interleave: true
use_input_image_for_vlm: true
model:
# pretrained_model_path: ~
pretrained_vae_model_name_or_path: /share_2/luoxin/modelscope/hub/models/FLUX.1-dev
pretrained_text_encoder_model_name_or_path: /share/shitao/models/Qwen2.5-VL-3B-Instruct
pretrained_diffusion_model_name_or_path: /share_2/luoxin/projects/Ominigenv2/experiments/0605_mix5/checkpoint-1000/pytorch_model_fsdp.bin
use_vae_input: false
use_all_condition: true
use_image_tokens: false
use_hw_tokens: false
use_only_text_token_from_mllm: true
use_only_prev_text_token_from_mllm: false
arch_type: Lumina2Transformer_separate_ref_process
arch_opt:
patch_size: 2
in_channels: 16
hidden_size: 2520
num_layers: 32
num_refiner_layers: 2
num_attention_heads: 21
num_kv_heads: 7
multiple_of: 256
norm_eps: !!float 1e-05
axes_dim_rope: [40, 40, 40]
axes_lens: [12000, 12000, 12000]
text_feat_dim: 2048
timestep_scale: !!float 1000
position_id_version: ominicontrol
separate_ref_image_patch_embed: true
separate_ref_image_refiner: true
use_fused_rms_norm: true
use_fused_swiglu: true
proj_feat_depth: 8
projector_dim: 2048
proj_feat_dim: 1024
transport:
snr_type: lognorm
do_shift: true
dynamic_time_shift: true
train:
# Dataloader
global_batch_size: 128
batch_size: 2
gradient_accumulation_steps: 1
num_train_epochs: 5
dataloader_num_workers: 6
# Optimizer
learning_rate: !!float 1e-6
mllm_lr: !!float 3e-7
mllm_projector_lr: !!float 1e-7
mllm_vision_lr: !!float 1e-7
mllm_image_token_lr: !!float 1e-4
scale_lr: false
lr_scheduler: timm_cosine
t_initial: 200000
lr_min: 0
cycle_decay: 0.5
warmup_t: 500
warmup_lr_init: 0
warmup_prefix: true
t_in_epochs: false
# resume_from_checkpoint:
use_8bit_adam: false
adam_beta1: 0.9
adam_beta2: 0.95
adam_weight_decay: !!float 0.01
adam_epsilon: !!float 1e-08
max_grad_norm: 1
gradient_checkpointing: true
set_grads_to_none: true
# Misc
allow_tf32: false
mixed_precision: 'bf16'
ema_decay: 0.0
repa_loss_weight: !!float 0.0
repa_encoder: dinov2_vitl14_reg
repa_batch_infer: true
text_loss_weight: !!float 1.0
train_diffusion: true
train_mllm: true
train_diffusion_context_refiner: true
val:
validation_steps: 200
train_visualization_steps: 200
logger:
log_with: [wandb, tensorboard]
# log_with: ~
checkpointing_steps: 1000
checkpoints_total_limit: ~
cache_dir:
resume_from_checkpoint: latest
previous_stage_ckpt: