| co_training: true | |
| data_root_dir: data | |
| hf_token: ./hf-token | |
| image_aug: false | |
| is_resume: true | |
| local_rank: 0 | |
| pretrained_checkpoint: /ariesdv0/agopalkr/openvla-logs/prism-dinosiglip-224px+mx-rt_1_cotraining+robopoint_dino_freeze_st_4/checkpoints/step-390000-epoch-13-loss=0.0917.pt | |
| resume_epoch: 13 | |
| resume_step: 390000 | |
| run_id: prism-dinosiglip-224px+mx-rt_1_cotraining+robopoint_dino_freeze_st_4 | |
| run_id_note: null | |
| run_root_dir: logs | |
| save_interval: 2500 | |
| seed: 7 | |
| trackers: | |
| - jsonl | |
| - wandb | |
| vla: | |
| base_vlm: prism-dinosiglip-224px+7b | |
| co_training_data_mix: all+trace | |
| data_mix: rt_1 | |
| enable_gradient_checkpointing: true | |
| enable_mixed_precision_training: true | |
| enable_tf32: true | |
| epochs: 50 | |
| expected_world_size: 4 | |
| freeze_llm_backbone: false | |
| freeze_vision_backbone: false | |
| global_batch_size: 128 | |
| learning_rate: 8.882505490596547e-06 | |
| lr_scheduler_type: linear-warmup+cosine-decay | |
| max_grad_norm: 1.0 | |
| max_steps: null | |
| per_device_batch_size: 32 | |
| reduce_in_full_precision: true | |
| shuffle_buffer_size: 256000 | |
| train_strategy: fsdp-full-shard | |
| type: prism-dinosiglip-224px+mx-rt_1_cotraining+robopoint_dino_freeze_st_4 | |
| unfreeze_last_llm_layer: false | |
| vla_id: prism-dinosiglip-224px+mx-rt_1_cotraining+robopoint_dino_freeze_st_4 | |
| warmup_ratio: 0.0 | |
| weight_decay: 0.0 | |
| wandb_entity: SU-Lab-openvla | |
| wandb_project: openvla-cotraining | |