co_training: true data_root_dir: data hf_token: ./hf-token image_aug: false is_resume: true local_rank: 0 pretrained_checkpoint: /ariesdv0/agopalkr/openvla-logs/pi-0-224px-bridge_cotraining_st_8/checkpoints/step-285000-epoch-17-loss=0.0839.pt resume_epoch: 17 resume_step: 285000 run_id: pi-0-224px-bridge_cotraining_st_8 run_id_note: null run_root_dir: logs save_interval: 2500 seed: 7 trackers: - jsonl - wandb vla: base_vlm: paligemma-224px+3b co_training_data_mix: all+trace data_mix: bridge enable_gradient_checkpointing: true enable_mixed_precision_training: true enable_tf32: true epochs: 50 expected_world_size: 8 freeze_llm_backbone: false freeze_vision_backbone: false global_batch_size: 128 learning_rate: 2.0e-05 lr_scheduler_type: linear-warmup+cosine-decay max_grad_norm: 1.0 max_steps: null per_device_batch_size: 16 reduce_in_full_precision: true shuffle_buffer_size: 256000 train_strategy: fsdp-full-shard type: pi-0-224px-bridge_cotraining_st_8 unfreeze_last_llm_layer: false vla_id: pi-0-224px-bridge_cotraining_st_8 warmup_ratio: 0.0 weight_decay: 0.0 wandb_entity: SU-Lab-openvla wandb_project: pi-0-cotraining