| cd /workspace/rl4phyx/RL4Phyx/SFT | |
| export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 | |
| export PYTHONUNBUFFERED=1 | |
| TIMESTAMP=$(date +%Y%m%d_%H%M%S) | |
| LOG="/workspace/rl4phyx/logs/train_lora_phyx_math_f_8gpu_${TIMESTAMP}.log" | |
| echo "Starting lora_phyx_math_f (8 GPUs, grad_accum=8, batch=64) at $(date)" > "$LOG" | |
| torchrun --nproc_per_node=8 --master_port=29500 train_sft_phyx_math_lora_freeze.py >> "$LOG" 2>&1 | |
| echo "TRAINING_COMPLETE at $(date)" >> "$LOG" | |