export PYTORCH_CUDA_ALLOC_CONF="" export EXPERIMENT_NAME=llm_guard_3B_10k_v2 export WAND_PROJECT='guard' export CUDA_DEVICE_ORDER="PCI_BUS_ID" export CUDA_VISIBLE_DEVICES=1,2 export VLLM_ATTENTION_BACKEND=FLASH_ATTN PYTHONUNBUFFERED=1 NCCL_P2P_DISABLE=1 NCCL_IB_DISABLE=1 python3 -m verl.trainer.main_ppo \ data.train_files=/home/mshahidul/readctrl/code/RL_model/verl/Search-R1/dataset/train.parquet \ data.val_files=/home/mshahidul/readctrl/code/RL_model/verl/Search-R1/dataset/test.parquet \ data.train_batch_size=64 \ data.val_batch_size=64 \ data.max_prompt_length=4096 \ data.max_response_length=1024 \ data.shuffle_train_dataloader=True \ algorithm.adv_estimator=grpo \ actor_rollout_ref.model.path=Qwen/Qwen3-4B-Instruct-2507 \ actor_rollout_ref.model.enable_gradient_checkpointing=true \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ +actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=true \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=true \ actor_rollout_ref.rollout.log_prob_micro_batch_size=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size=64 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ trainer.logger=['wandb'] \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=100 \ trainer.test_freq=50 \ trainer.project_name=$WANDB_PROJECT \ trainer.experiment_name=$EXPERIMENT_NAME \ trainer.total_epochs=15 \ trainer.total_training_steps=1005 \ trainer.default_local_dir=verl_checkpoints/$EXPERIMENT_NAME \ do_search=false \ max_turns=1 \ 2>&1 | tee $EXPERIMENT_NAME.log