Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +3 -0
- deep_search/DeepResearcher/examples/generation/run_deepseek_v2_lite_math.sh +16 -0
- deep_search/DeepResearcher/examples/grpo_trainer/run_deepseek7b_llm_seq_balance.sh +36 -0
- deep_search/DeepResearcher/examples/grpo_trainer/run_qwen2-7b.sh +39 -0
- deep_search/DeepResearcher/examples/grpo_trainer/run_qwen2_5_vl-7b.sh +43 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek7b_llm.sh +39 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek7b_llm_sp2.sh +41 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_full_hh_rlhf.sh +40 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron.sh +39 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_megatron.sh +43 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_gemma.sh +38 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b.sh +47 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_math_gsm8k_megatron.sh +41 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_rm.sh +70 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh +58 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_seq_balance.sh +49 -0
- deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2.5-32b.sh +48 -0
- deep_search/DeepResearcher/examples/ppo_trainer/verl_getting_started.ipynb +0 -0
- deep_search/DeepResearcher/examples/slurm/ray_on_slurm.slurm +99 -0
- deep_search/DeepResearcher/scripts/format.sh +3 -0
- deep_search/DeepResearcher/scripts/model_merger.py +172 -0
- deep_search/DeepResearcher/signal/data.json +18 -0
- deep_search/DeepResearcher/signal/data_read.json +111 -0
- deep_search/DeepResearcher/signal/data_search.json +180 -0
- deep_search/DeepResearcher/signal/signal.json +3 -0
- deep_search/DeepResearcher/tests/__init__.py +13 -0
- deep_search/DeepResearcher/tests/e2e/__init__.py +13 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/data/create_dataset.py +46 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/config.json +29 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/create_model_tokenizer.py +61 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/generation_config.json +6 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/tokenizer_config.json +18 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/rl/README.md +37 -0
- deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/rl/main_trainer.py +147 -0
- deep_search/DeepResearcher/tests/e2e/check_results.py +52 -0
- deep_search/DeepResearcher/tests/e2e/envs/__init__.py +17 -0
- deep_search/DeepResearcher/tests/e2e/envs/digit_completion/__init__.py +22 -0
- deep_search/DeepResearcher/tests/e2e/envs/digit_completion/task.py +177 -0
- deep_search/DeepResearcher/tests/e2e/envs/digit_completion/tokenizer.py +158 -0
- deep_search/DeepResearcher/tests/e2e/run_deepseek_megatron.sh +40 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen2vl_geo3k_function_rm.sh +41 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm.sh +40 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_grpo.sh +33 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_no_rmpad.sh +40 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_remax.sh +33 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm.sh +48 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_liger_kernel.sh +49 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_no_rmpad.sh +48 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_seq_balance.sh +51 -0
- deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_ulysses.sh +53 -0
.gitattributes
CHANGED
|
@@ -2230,3 +2230,6 @@ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_
|
|
| 2230 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_9.json filter=lfs diff=lfs merge=lfs -text
|
| 2231 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_8.json filter=lfs diff=lfs merge=lfs -text
|
| 2232 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_5.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 2230 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_9.json filter=lfs diff=lfs merge=lfs -text
|
| 2231 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_8.json filter=lfs diff=lfs merge=lfs -text
|
| 2232 |
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_5.json filter=lfs diff=lfs merge=lfs -text
|
| 2233 |
+
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_10.json filter=lfs diff=lfs merge=lfs -text
|
| 2234 |
+
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_11.json filter=lfs diff=lfs merge=lfs -text
|
| 2235 |
+
deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_6.json filter=lfs diff=lfs merge=lfs -text
|
deep_search/DeepResearcher/examples/generation/run_deepseek_v2_lite_math.sh
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python3 -m verl.trainer.main_generation \
|
| 2 |
+
trainer.nnodes=1 \
|
| 3 |
+
trainer.n_gpus_per_node=8 \
|
| 4 |
+
data.path=~/data/rlhf/gsm8k/test.parquet \
|
| 5 |
+
data.prompt_key=prompt \
|
| 6 |
+
data.n_samples=1 \
|
| 7 |
+
data.output_path=~/data/rlhf/math/deepseek_v2_lite_gen_test.parquet \
|
| 8 |
+
model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 9 |
+
+model.trust_remote_code=True \
|
| 10 |
+
rollout.temperature=1.0 \
|
| 11 |
+
rollout.top_k=50 \
|
| 12 |
+
rollout.top_p=0.7 \
|
| 13 |
+
rollout.prompt_length=2048 \
|
| 14 |
+
rollout.response_length=1024 \
|
| 15 |
+
rollout.tensor_model_parallel_size=2 \
|
| 16 |
+
rollout.gpu_memory_utilization=0.8
|
deep_search/DeepResearcher/examples/grpo_trainer/run_deepseek7b_llm_seq_balance.sh
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
python3 -m verl.trainer.main_ppo \
|
| 4 |
+
algorithm.adv_estimator=grpo \
|
| 5 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 6 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 7 |
+
data.train_batch_size=1024 \
|
| 8 |
+
data.max_prompt_length=512 \
|
| 9 |
+
data.max_response_length=512 \
|
| 10 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 11 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 12 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 13 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 14 |
+
actor_rollout_ref.actor.use_dynamic_bsz=True \
|
| 15 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \
|
| 16 |
+
actor_rollout_ref.actor.use_kl_loss=True \
|
| 17 |
+
actor_rollout_ref.actor.kl_loss_coef=0.001 \
|
| 18 |
+
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
|
| 19 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 20 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 21 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 22 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 23 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 24 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 25 |
+
actor_rollout_ref.rollout.n=5 \
|
| 26 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 27 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 28 |
+
trainer.critic_warmup=0 \
|
| 29 |
+
trainer.logger=['console','wandb'] \
|
| 30 |
+
trainer.project_name='verl_grpo_example_gsm8k' \
|
| 31 |
+
trainer.experiment_name='deepseek_llm_7b_function_rm_seq_packing' \
|
| 32 |
+
trainer.n_gpus_per_node=8 \
|
| 33 |
+
trainer.nnodes=1 \
|
| 34 |
+
trainer.save_freq=-1 \
|
| 35 |
+
trainer.test_freq=5 \
|
| 36 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/grpo_trainer/run_qwen2-7b.sh
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
algorithm.adv_estimator=grpo \
|
| 7 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 8 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 9 |
+
data.train_batch_size=1024 \
|
| 10 |
+
data.max_prompt_length=512 \
|
| 11 |
+
data.max_response_length=1024 \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=80 \
|
| 17 |
+
actor_rollout_ref.actor.use_kl_loss=True \
|
| 18 |
+
actor_rollout_ref.actor.kl_loss_coef=0.001 \
|
| 19 |
+
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
|
| 20 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 21 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 22 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 23 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=160 \
|
| 24 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 25 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 26 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 27 |
+
actor_rollout_ref.rollout.n=5 \
|
| 28 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=160 \
|
| 29 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 30 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 31 |
+
trainer.critic_warmup=0 \
|
| 32 |
+
trainer.logger=['console','wandb'] \
|
| 33 |
+
trainer.project_name='verl_grpo_example_gsm8k' \
|
| 34 |
+
trainer.experiment_name='qwen2_7b_function_rm' \
|
| 35 |
+
trainer.n_gpus_per_node=8 \
|
| 36 |
+
trainer.nnodes=1 \
|
| 37 |
+
trainer.save_freq=-1 \
|
| 38 |
+
trainer.test_freq=5 \
|
| 39 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/grpo_trainer/run_qwen2_5_vl-7b.sh
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
algorithm.adv_estimator=grpo \
|
| 7 |
+
data.train_files=$HOME/data/geo3k/train.parquet \
|
| 8 |
+
data.val_files=$HOME/data/geo3k/test.parquet \
|
| 9 |
+
data.train_batch_size=512 \
|
| 10 |
+
data.max_prompt_length=1024 \
|
| 11 |
+
data.max_response_length=2048 \
|
| 12 |
+
data.image_key=images \
|
| 13 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \
|
| 14 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 15 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=128 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \
|
| 18 |
+
actor_rollout_ref.actor.use_kl_loss=True \
|
| 19 |
+
actor_rollout_ref.actor.kl_loss_coef=0.01 \
|
| 20 |
+
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
|
| 21 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 22 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 23 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 24 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \
|
| 25 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 26 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 27 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 28 |
+
actor_rollout_ref.rollout.enable_chunked_prefill=False \
|
| 29 |
+
actor_rollout_ref.rollout.enforce_eager=False \
|
| 30 |
+
actor_rollout_ref.rollout.free_cache_engine=False \
|
| 31 |
+
actor_rollout_ref.rollout.n=5 \
|
| 32 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \
|
| 33 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 34 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 35 |
+
trainer.critic_warmup=0 \
|
| 36 |
+
trainer.logger=['console','wandb'] \
|
| 37 |
+
trainer.project_name='verl_grpo_example_geo3k' \
|
| 38 |
+
trainer.experiment_name='qwen2_5_vl_7b_function_rm' \
|
| 39 |
+
trainer.n_gpus_per_node=8 \
|
| 40 |
+
trainer.nnodes=1 \
|
| 41 |
+
trainer.save_freq=-1 \
|
| 42 |
+
trainer.test_freq=5 \
|
| 43 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek7b_llm.sh
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
python3 -m verl.trainer.main_ppo \
|
| 4 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 5 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 6 |
+
data.train_batch_size=1024 \
|
| 7 |
+
data.max_prompt_length=512 \
|
| 8 |
+
data.max_response_length=512 \
|
| 9 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 10 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 11 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 12 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 13 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
|
| 14 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 15 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 16 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 17 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \
|
| 18 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 19 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 20 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 21 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \
|
| 22 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 23 |
+
critic.optim.lr=1e-5 \
|
| 24 |
+
critic.model.use_remove_padding=True \
|
| 25 |
+
critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 26 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 27 |
+
critic.ppo_micro_batch_size_per_gpu=32 \
|
| 28 |
+
critic.model.fsdp_config.param_offload=False \
|
| 29 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 30 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 31 |
+
trainer.critic_warmup=0 \
|
| 32 |
+
trainer.logger=['console','wandb'] \
|
| 33 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 34 |
+
trainer.experiment_name='deepseek_llm_7b_function_rm' \
|
| 35 |
+
trainer.n_gpus_per_node=8 \
|
| 36 |
+
trainer.nnodes=1 \
|
| 37 |
+
trainer.save_freq=-1 \
|
| 38 |
+
trainer.test_freq=1 \
|
| 39 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek7b_llm_sp2.sh
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
python3 -m verl.trainer.main_ppo \
|
| 4 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 5 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 6 |
+
data.train_batch_size=1024 \
|
| 7 |
+
data.max_prompt_length=512 \
|
| 8 |
+
data.max_response_length=512 \
|
| 9 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 10 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 11 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 12 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 13 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \
|
| 14 |
+
actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \
|
| 15 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 16 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 17 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 18 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=64 \
|
| 19 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 20 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 21 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 22 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=64 \
|
| 23 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 24 |
+
critic.optim.lr=1e-5 \
|
| 25 |
+
critic.ulysses_sequence_parallel_size=2 \
|
| 26 |
+
critic.model.use_remove_padding=True \
|
| 27 |
+
critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 28 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 29 |
+
critic.ppo_micro_batch_size_per_gpu=64 \
|
| 30 |
+
critic.model.fsdp_config.param_offload=False \
|
| 31 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 32 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 33 |
+
trainer.critic_warmup=0 \
|
| 34 |
+
trainer.logger=['console','wandb'] \
|
| 35 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 36 |
+
trainer.experiment_name='deepseek_llm_7b_function_rm_sp2' \
|
| 37 |
+
trainer.n_gpus_per_node=8 \
|
| 38 |
+
trainer.nnodes=1 \
|
| 39 |
+
trainer.save_freq=-1 \
|
| 40 |
+
trainer.test_freq=5 \
|
| 41 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_full_hh_rlhf.sh
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
train_files=$HOME/data/full_hh_rlhf/rl/train.parquet
|
| 4 |
+
test_files=$HOME/data/full_hh_rlhf/rl/train.parquet # no use
|
| 5 |
+
|
| 6 |
+
python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
|
| 7 |
+
data.train_files="$train_files" \
|
| 8 |
+
data.val_files="$test_files" \
|
| 9 |
+
data.train_batch_size=512 \
|
| 10 |
+
data.max_prompt_length=128 \
|
| 11 |
+
data.max_response_length=128 \
|
| 12 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=128 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 16 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
|
| 17 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 18 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 19 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 20 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
|
| 21 |
+
actor_rollout_ref.ref.param_offload=False \
|
| 22 |
+
critic.optim.lr=1e-5 \
|
| 23 |
+
critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 24 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 25 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 26 |
+
reward_model.enable=True \
|
| 27 |
+
reward_model.megatron.tensor_model_parallel_size=4 \
|
| 28 |
+
reward_model.model.path=deepseek-ai/deepseek-llm-7b-chat \
|
| 29 |
+
reward_model.micro_batch_size_per_gpu=4 \
|
| 30 |
+
reward_model.param_offload=False \
|
| 31 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 32 |
+
trainer.critic_warmup=0 \
|
| 33 |
+
trainer.logger=['console','wandb'] \
|
| 34 |
+
trainer.project_name='verl_megatron_full_hh_rlhf_examples' \
|
| 35 |
+
trainer.experiment_name='deepseek_llm_7b_model_rm' \
|
| 36 |
+
trainer.n_gpus_per_node=8 \
|
| 37 |
+
trainer.nnodes=1 \
|
| 38 |
+
trainer.save_freq=-1 \
|
| 39 |
+
trainer.test_freq=5 \
|
| 40 |
+
trainer.total_epochs=100 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron.sh
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 4 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 5 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 6 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 7 |
+
|
| 8 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 9 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
|
| 12 |
+
data.train_files="$train_files" \
|
| 13 |
+
data.val_files="$test_files" \
|
| 14 |
+
data.train_batch_size=1024 \
|
| 15 |
+
data.max_prompt_length=1024 \
|
| 16 |
+
data.max_response_length=512 \
|
| 17 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-coder-6.7b-instruct \
|
| 18 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 19 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 20 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 21 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
|
| 22 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 23 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 24 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 25 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
|
| 26 |
+
critic.optim.lr=1e-5 \
|
| 27 |
+
critic.model.path=deepseek-ai/deepseek-coder-6.7b-instruct \
|
| 28 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 29 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 30 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 31 |
+
trainer.critic_warmup=0 \
|
| 32 |
+
trainer.logger=['console','wandb'] \
|
| 33 |
+
trainer.project_name='verl_megatron_math_gsm8k_examples' \
|
| 34 |
+
trainer.experiment_name='deepseek_llm_7b_function_rm' \
|
| 35 |
+
trainer.n_gpus_per_node=8 \
|
| 36 |
+
trainer.nnodes=1 \
|
| 37 |
+
trainer.save_freq=-1 \
|
| 38 |
+
trainer.test_freq=5 \
|
| 39 |
+
trainer.total_epochs=100 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_deepseek_megatron.sh
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
# prepare pre-trained model ckpt
|
| 4 |
+
huggingface-cli download deepseek-ai/deepseek-llm-7b-chat --local-dir $HOME/models/deepseek-llm-7b-chat
|
| 5 |
+
|
| 6 |
+
# ``actor_rollout_ref.rollout.tensor_model_parallel_size`` in theory could be different from
|
| 7 |
+
# ``**.megatron.tensor_model_parallel_size``
|
| 8 |
+
|
| 9 |
+
# the config file used: verl/trainer/main_ppo/config/ppo_megatron_trainer.yaml
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo --config-path=config \
|
| 12 |
+
--config-name='ppo_megatron_trainer.yaml'\
|
| 13 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 14 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 15 |
+
data.train_batch_size=1024 \
|
| 16 |
+
data.max_prompt_length=512 \
|
| 17 |
+
data.max_response_length=512 \
|
| 18 |
+
actor_rollout_ref.model.path=$HOME/models/deepseek-llm-7b-chat \
|
| 19 |
+
actor_rollout_ref.actor.optim.lr=2e-6 \
|
| 20 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 21 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 22 |
+
actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \
|
| 23 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
|
| 24 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 25 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 26 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
|
| 27 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 28 |
+
actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \
|
| 29 |
+
critic.optim.lr=2e-5 \
|
| 30 |
+
critic.model.path=$HOME/models/deepseek-llm-7b-chat \
|
| 31 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 32 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 33 |
+
critic.megatron.tensor_model_parallel_size=4 \
|
| 34 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 35 |
+
trainer.critic_warmup=0 \
|
| 36 |
+
trainer.logger=['console','wandb'] \
|
| 37 |
+
trainer.project_name='verl_megatron_gsm8k_examples' \
|
| 38 |
+
trainer.experiment_name='deepseek_llm_7b_function_rm' \
|
| 39 |
+
trainer.n_gpus_per_node=8 \
|
| 40 |
+
trainer.nnodes=1 \
|
| 41 |
+
trainer.save_freq=-1 \
|
| 42 |
+
trainer.total_epochs=15 \
|
| 43 |
+
+trainer.val_before_train=False $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_gemma.sh
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
python3 -m verl.trainer.main_ppo \
|
| 4 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 5 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 6 |
+
data.train_batch_size=512 \
|
| 7 |
+
data.max_prompt_length=1024 \
|
| 8 |
+
data.max_response_length=512 \
|
| 9 |
+
actor_rollout_ref.model.path=google/gemma-2-2b-it \
|
| 10 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 11 |
+
actor_rollout_ref.model.use_remove_padding=False \
|
| 12 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=128 \
|
| 13 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 14 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 15 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 16 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
|
| 17 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 18 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 19 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 20 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
|
| 21 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 22 |
+
critic.optim.lr=1e-5 \
|
| 23 |
+
critic.model.use_remove_padding=False \
|
| 24 |
+
critic.model.path=google/gemma-2-2b-it \
|
| 25 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 26 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 27 |
+
critic.model.fsdp_config.param_offload=False \
|
| 28 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 29 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 30 |
+
trainer.critic_warmup=0 \
|
| 31 |
+
trainer.logger=['console','wandb'] \
|
| 32 |
+
trainer.project_name='verl_example' \
|
| 33 |
+
trainer.experiment_name='gemma2b_function_rm' \
|
| 34 |
+
trainer.n_gpus_per_node=2 \
|
| 35 |
+
trainer.nnodes=1 \
|
| 36 |
+
trainer.save_freq=-1 \
|
| 37 |
+
trainer.test_freq=10 \
|
| 38 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b.sh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 4 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 5 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 6 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 7 |
+
|
| 8 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 9 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo \
|
| 12 |
+
data.train_files="$train_files" \
|
| 13 |
+
data.val_files="$test_files" \
|
| 14 |
+
data.train_batch_size=1024 \
|
| 15 |
+
data.max_prompt_length=1024 \
|
| 16 |
+
data.max_response_length=512 \
|
| 17 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
|
| 18 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 19 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 20 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 21 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
|
| 22 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 23 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 24 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 25 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \
|
| 26 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 27 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 28 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 29 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \
|
| 30 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 31 |
+
critic.optim.lr=1e-5 \
|
| 32 |
+
critic.model.use_remove_padding=True \
|
| 33 |
+
critic.model.path=Qwen/Qwen2-7B-Instruct \
|
| 34 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 35 |
+
critic.ppo_micro_batch_size_per_gpu=32 \
|
| 36 |
+
critic.model.fsdp_config.param_offload=False \
|
| 37 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 38 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 39 |
+
trainer.critic_warmup=0 \
|
| 40 |
+
trainer.logger=['console','wandb'] \
|
| 41 |
+
trainer.project_name='verl_example' \
|
| 42 |
+
trainer.experiment_name='Qwen2-7B-Instruct_function_rm' \
|
| 43 |
+
trainer.n_gpus_per_node=8 \
|
| 44 |
+
trainer.nnodes=1 \
|
| 45 |
+
trainer.save_freq=-1 \
|
| 46 |
+
trainer.test_freq=10 \
|
| 47 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_math_gsm8k_megatron.sh
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 6 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 7 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 8 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 9 |
+
|
| 10 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 11 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 12 |
+
|
| 13 |
+
python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
|
| 14 |
+
data.train_files="$train_files" \
|
| 15 |
+
data.val_files="$test_files" \
|
| 16 |
+
data.train_batch_size=1024 \
|
| 17 |
+
data.max_prompt_length=1024 \
|
| 18 |
+
data.max_response_length=512 \
|
| 19 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
|
| 20 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 21 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 22 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 23 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
|
| 24 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 25 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 26 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 27 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
|
| 28 |
+
critic.optim.lr=1e-5 \
|
| 29 |
+
critic.model.path=Qwen/Qwen2-7B-Instruct \
|
| 30 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 31 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 32 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 33 |
+
trainer.critic_warmup=0 \
|
| 34 |
+
trainer.logger=['console','wandb'] \
|
| 35 |
+
trainer.project_name='verl_megatron_math_gsm8k_examples' \
|
| 36 |
+
trainer.experiment_name='qwen2_7b_function_rm' \
|
| 37 |
+
trainer.n_gpus_per_node=8 \
|
| 38 |
+
trainer.nnodes=1 \
|
| 39 |
+
trainer.save_freq=-1 \
|
| 40 |
+
trainer.test_freq=5 \
|
| 41 |
+
trainer.total_epochs=100 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_rm.sh
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Discliamer: the model used in the script is only for academic purpose.
|
| 2 |
+
set -x
|
| 3 |
+
|
| 4 |
+
# Data preparation scripts are available in ``examples/data_preprocess``.
|
| 5 |
+
# Example usage:
|
| 6 |
+
#
|
| 7 |
+
# python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math
|
| 8 |
+
# python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k
|
| 9 |
+
|
| 10 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 11 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 12 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 13 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 14 |
+
|
| 15 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 16 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 17 |
+
|
| 18 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS # vllm + qwen2-7b with flash_attn has some issues
|
| 19 |
+
|
| 20 |
+
# prepare model ckpt
|
| 21 |
+
huggingface-cli download Qwen/Qwen2-7B-Instruct --local-dir $HOME/models/Qwen2-7B-Instruct &
|
| 22 |
+
huggingface-cli download sfairXC/FsfairX-LLaMA3-RM-v0.1 --local-dir $HOME/models/FsfairX-LLaMA3-RM-v0.1 &
|
| 23 |
+
wait
|
| 24 |
+
|
| 25 |
+
python3 -m verl.trainer.main_ppo \
|
| 26 |
+
data.train_files="$train_files" \
|
| 27 |
+
data.val_files="$test_files" \
|
| 28 |
+
data.train_batch_size=1024 \
|
| 29 |
+
data.max_prompt_length=1024 \
|
| 30 |
+
data.max_response_length=512 \
|
| 31 |
+
data.return_raw_chat=True \
|
| 32 |
+
actor_rollout_ref.model.path="$HOME/models/Qwen2-7B-Instruct" \
|
| 33 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 34 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 35 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 36 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 37 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
|
| 38 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 39 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 40 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 41 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 42 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
|
| 43 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 44 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
|
| 45 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 46 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 47 |
+
critic.optim.lr=1e-5 \
|
| 48 |
+
critic.model.use_remove_padding=True \
|
| 49 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 50 |
+
critic.model.path="$HOME/models/Qwen2-7B-Instruct" \
|
| 51 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 52 |
+
critic.ppo_micro_batch_size_per_gpu=32 \
|
| 53 |
+
critic.model.fsdp_config.param_offload=False \
|
| 54 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 55 |
+
reward_model.enable=True \
|
| 56 |
+
reward_model.model.path="$HOME/models/FsfairX-LLaMA3-RM-v0.1" \
|
| 57 |
+
reward_model.model.use_remove_padding=True \
|
| 58 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 59 |
+
reward_model.micro_batch_size_per_gpu=32 \
|
| 60 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 61 |
+
trainer.critic_warmup=0 \
|
| 62 |
+
trainer.logger=['console','wandb'] \
|
| 63 |
+
trainer.project_name='verl_example' \
|
| 64 |
+
+trainer.val_before_train=False \
|
| 65 |
+
trainer.experiment_name='Qwen2-7B-Instruct_hybrid_rm' \
|
| 66 |
+
trainer.n_gpus_per_node=8 \
|
| 67 |
+
trainer.nnodes=1 \
|
| 68 |
+
trainer.save_freq=-1 \
|
| 69 |
+
trainer.test_freq=5 \
|
| 70 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 4 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 5 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 6 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 7 |
+
|
| 8 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 9 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo \
|
| 12 |
+
data.train_files="$train_files" \
|
| 13 |
+
data.val_files="$test_files" \
|
| 14 |
+
data.train_batch_size=4096 \
|
| 15 |
+
data.max_prompt_length=4096 \
|
| 16 |
+
data.max_response_length=4096 \
|
| 17 |
+
data.return_raw_chat=True \
|
| 18 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
|
| 19 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 20 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 21 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 22 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=512 \
|
| 23 |
+
actor_rollout_ref.actor.use_dynamic_bsz=True \
|
| 24 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \
|
| 25 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 26 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 27 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 28 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 29 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
|
| 30 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \
|
| 31 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 32 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=24000 \
|
| 33 |
+
critic.optim.lr=1e-5 \
|
| 34 |
+
critic.model.use_remove_padding=True \
|
| 35 |
+
critic.model.path=Qwen/Qwen2-7B-Instruct \
|
| 36 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 37 |
+
critic.use_dynamic_bsz=True \
|
| 38 |
+
critic.ppo_max_token_len_per_gpu=98304 \
|
| 39 |
+
critic.model.fsdp_config.param_offload=False \
|
| 40 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 41 |
+
reward_model.enable=True \
|
| 42 |
+
reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\
|
| 43 |
+
reward_model.model.use_remove_padding=True \
|
| 44 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 45 |
+
reward_model.micro_batch_size_per_gpu=32 \
|
| 46 |
+
reward_model.use_dynamic_bsz=True \
|
| 47 |
+
reward_model.forward_max_token_len_per_gpu=98304 \
|
| 48 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 49 |
+
trainer.critic_warmup=0 \
|
| 50 |
+
trainer.logger=['console','wandb'] \
|
| 51 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 52 |
+
trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing' \
|
| 53 |
+
trainer.n_gpus_per_node=8 \
|
| 54 |
+
+trainer.val_before_train=False \
|
| 55 |
+
trainer.nnodes=1 \
|
| 56 |
+
trainer.save_freq=-1 \
|
| 57 |
+
trainer.test_freq=5 \
|
| 58 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2-7b_seq_balance.sh
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 4 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 5 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 6 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 7 |
+
|
| 8 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 9 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo \
|
| 12 |
+
data.train_files="$train_files" \
|
| 13 |
+
data.val_files="$test_files" \
|
| 14 |
+
data.train_batch_size=4096 \
|
| 15 |
+
data.max_prompt_length=4096 \
|
| 16 |
+
data.max_response_length=4096 \
|
| 17 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
|
| 18 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 19 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 20 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 21 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=512 \
|
| 22 |
+
actor_rollout_ref.actor.use_dynamic_bsz=True \
|
| 23 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \
|
| 24 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 25 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 26 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 27 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 28 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
|
| 29 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \
|
| 30 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 31 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=24000 \
|
| 32 |
+
critic.optim.lr=1e-5 \
|
| 33 |
+
critic.model.use_remove_padding=True \
|
| 34 |
+
critic.model.path=Qwen/Qwen2-7B-Instruct \
|
| 35 |
+
critic.model.enable_gradient_checkpointing=True \
|
| 36 |
+
critic.ppo_max_token_len_per_gpu=98304 \
|
| 37 |
+
critic.model.fsdp_config.param_offload=False \
|
| 38 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 39 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 40 |
+
trainer.critic_warmup=0 \
|
| 41 |
+
trainer.logger=['console','wandb'] \
|
| 42 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 43 |
+
trainer.experiment_name='qwen2-7b_function_rm_bsz8k_p4k_r4k_seq_packing' \
|
| 44 |
+
trainer.n_gpus_per_node=8 \
|
| 45 |
+
+trainer.val_before_train=False \
|
| 46 |
+
trainer.nnodes=1 \
|
| 47 |
+
trainer.save_freq=-1 \
|
| 48 |
+
trainer.test_freq=5 \
|
| 49 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/run_qwen2.5-32b.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
| 4 |
+
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
| 5 |
+
math_train_path=$HOME/data/math/train.parquet
|
| 6 |
+
math_test_path=$HOME/data/math/test.parquet
|
| 7 |
+
|
| 8 |
+
train_files="['$gsm8k_train_path', '$math_train_path']"
|
| 9 |
+
test_files="['$gsm8k_test_path', '$math_test_path']"
|
| 10 |
+
|
| 11 |
+
python3 -m verl.trainer.main_ppo \
|
| 12 |
+
data.train_files="$train_files" \
|
| 13 |
+
data.val_files="$test_files" \
|
| 14 |
+
data.train_batch_size=1024 \
|
| 15 |
+
data.max_prompt_length=1024 \
|
| 16 |
+
data.max_response_length=1024 \
|
| 17 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \
|
| 18 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=False \
|
| 19 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 20 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 21 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 22 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
|
| 23 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 24 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 25 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 26 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 27 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
|
| 28 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 29 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
|
| 30 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 31 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 32 |
+
critic.optim.lr=1e-5 \
|
| 33 |
+
critic.model.use_remove_padding=True \
|
| 34 |
+
critic.model.path=Qwen/Qwen2.5-32B-Instruct \
|
| 35 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 36 |
+
critic.ppo_micro_batch_size_per_gpu=8 \
|
| 37 |
+
critic.model.fsdp_config.param_offload=False \
|
| 38 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 39 |
+
algorithm.kl_ctrl.kl_coef=0.0001 \
|
| 40 |
+
trainer.critic_warmup=0 \
|
| 41 |
+
trainer.logger=['console','wandb'] \
|
| 42 |
+
trainer.project_name='verl_example' \
|
| 43 |
+
trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \
|
| 44 |
+
trainer.n_gpus_per_node=8 \
|
| 45 |
+
trainer.nnodes=4 \
|
| 46 |
+
trainer.save_freq=-1 \
|
| 47 |
+
trainer.test_freq=10 \
|
| 48 |
+
trainer.total_epochs=15 $@
|
deep_search/DeepResearcher/examples/ppo_trainer/verl_getting_started.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/DeepResearcher/examples/slurm/ray_on_slurm.slurm
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --job-name=verl-ray-on-slurm
|
| 3 |
+
#SBATCH --nodes=2
|
| 4 |
+
#SBATCH --ntasks-per-node=1
|
| 5 |
+
#SBATCH --mem=200G
|
| 6 |
+
#SBATCH --partition=your-partition
|
| 7 |
+
#SBATCH --time=01:00:00
|
| 8 |
+
#SBATCH --account=your-account
|
| 9 |
+
#SBATCH --gpus-per-node=4
|
| 10 |
+
#SBATCH --cpus-per-task=64
|
| 11 |
+
#SBATCH --output=slurm-%j.out
|
| 12 |
+
#SBATCH --error=slurm-%j.err
|
| 13 |
+
|
| 14 |
+
# load necessary modules
|
| 15 |
+
|
| 16 |
+
# replace these information with your own
|
| 17 |
+
verl_workdir=/path/to/verl
|
| 18 |
+
train_files=/path/to/gsm8k/train.parquet
|
| 19 |
+
val_files=/path/to/gsm8k/test.parquet
|
| 20 |
+
apptainer_image_path=/path/to/verl-ngc.sif
|
| 21 |
+
# replace these information with your own
|
| 22 |
+
|
| 23 |
+
# Getting the node names
|
| 24 |
+
nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
| 25 |
+
nodes_array=("$nodes")
|
| 26 |
+
|
| 27 |
+
head_node=${nodes_array[0]}
|
| 28 |
+
head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address)
|
| 29 |
+
|
| 30 |
+
# if we detect a space character in the head node IP, we'll
|
| 31 |
+
# convert it to an ipv4 address. This step is optional.
|
| 32 |
+
if [[ "$head_node_ip" == *" "* ]]; then
|
| 33 |
+
IFS=' ' read -ra ADDR <<<"$head_node_ip"
|
| 34 |
+
if [[ ${#ADDR[0]} -gt 16 ]]; then
|
| 35 |
+
head_node_ip=${ADDR[1]}
|
| 36 |
+
else
|
| 37 |
+
head_node_ip=${ADDR[0]}
|
| 38 |
+
fi
|
| 39 |
+
echo "IPV6 address detected. We split the IPV4 address as $head_node_ip"
|
| 40 |
+
fi
|
| 41 |
+
|
| 42 |
+
port=6379
|
| 43 |
+
ip_head=$head_node_ip:$port
|
| 44 |
+
export ip_head
|
| 45 |
+
echo "IP Head: $ip_head"
|
| 46 |
+
|
| 47 |
+
# make sure we set environment variables before Ray initialization
|
| 48 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 49 |
+
|
| 50 |
+
printenv
|
| 51 |
+
|
| 52 |
+
echo "Starting HEAD at $head_node"
|
| 53 |
+
srun --nodes=1 --ntasks=1 -w "$head_node" \
|
| 54 |
+
apptainer run --nv --bind $verl_workdir $apptainer_image_path \
|
| 55 |
+
ray start --head --node-ip-address="$head_node_ip" --port=$port \
|
| 56 |
+
--num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block &
|
| 57 |
+
# optional, though may be useful in certain versions of Ray < 1.0.
|
| 58 |
+
sleep 10
|
| 59 |
+
|
| 60 |
+
# number of nodes other than the head node
|
| 61 |
+
worker_num=$((SLURM_JOB_NUM_NODES - 1))
|
| 62 |
+
|
| 63 |
+
for ((i = 1; i <= worker_num; i++)); do
|
| 64 |
+
node_i=${nodes_array[$i]}
|
| 65 |
+
echo "Starting WORKER $i at $node_i"
|
| 66 |
+
srun --nodes=1 --ntasks=1 -w "$node_i" \
|
| 67 |
+
apptainer run --nv --bind $verl_workdir $apptainer_image_path \
|
| 68 |
+
ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block &
|
| 69 |
+
sleep 5
|
| 70 |
+
done
|
| 71 |
+
|
| 72 |
+
PYTHONUNBUFFERED=1 srun --overlap --nodes=1 --ntasks=1 -w "$head_node" \
|
| 73 |
+
apptainer run --nv --bind $verl_workdir $apptainer_image_path \
|
| 74 |
+
python3 -m verl.trainer.main_ppo \
|
| 75 |
+
data.train_files=$train_files \
|
| 76 |
+
data.val_files=$val_files \
|
| 77 |
+
data.train_batch_size=256 \
|
| 78 |
+
data.max_prompt_length=512 \
|
| 79 |
+
data.max_response_length=256 \
|
| 80 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \
|
| 81 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 82 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=64 \
|
| 83 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 84 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
|
| 85 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
|
| 86 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 87 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
|
| 88 |
+
critic.optim.lr=1e-5 \
|
| 89 |
+
critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \
|
| 90 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 91 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 92 |
+
trainer.logger=['console'] \
|
| 93 |
+
+trainer.val_before_train=False \
|
| 94 |
+
trainer.default_hdfs_dir=null \
|
| 95 |
+
trainer.n_gpus_per_node=4 \
|
| 96 |
+
trainer.nnodes=1 \
|
| 97 |
+
trainer.save_freq=10 \
|
| 98 |
+
trainer.test_freq=10 \
|
| 99 |
+
trainer.total_epochs=15 2>&1 | tee verl_demo_slurm.log
|
deep_search/DeepResearcher/scripts/format.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
pip3 install --upgrade yapf
|
| 3 |
+
python3 -m yapf -ir -vv --style ./.style.yapf verl tests single_controller examples
|
deep_search/DeepResearcher/scripts/model_merger.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import List, Tuple, Dict
|
| 16 |
+
import re
|
| 17 |
+
import os
|
| 18 |
+
import torch
|
| 19 |
+
import argparse
|
| 20 |
+
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq
|
| 21 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 22 |
+
from torch.distributed._tensor import DTensor, Shard, Placement
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def merge_by_placement(tensors: List[torch.Tensor], placement: Placement):
|
| 26 |
+
if placement.is_replicate():
|
| 27 |
+
return tensors[0]
|
| 28 |
+
elif placement.is_partial():
|
| 29 |
+
raise NotImplementedError("Partial placement is not supported yet")
|
| 30 |
+
elif placement.is_shard():
|
| 31 |
+
return torch.cat(tensors, dim=placement.dim).contiguous()
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError(f"Unsupported placement: {placement}")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == '__main__':
|
| 37 |
+
parser = argparse.ArgumentParser()
|
| 38 |
+
parser.add_argument('--local_dir', required=True, type = str, help="The path for your saved model")
|
| 39 |
+
parser.add_argument("--hf_upload_path", default=False, type = str, help="The path of the huggingface repo to upload")
|
| 40 |
+
args = parser.parse_args()
|
| 41 |
+
|
| 42 |
+
assert not args.local_dir.endswith("huggingface"), "The local_dir should not end with huggingface"
|
| 43 |
+
local_dir = args.local_dir
|
| 44 |
+
|
| 45 |
+
# copy rank zero to find the shape of (dp, fsdp)
|
| 46 |
+
rank = 0
|
| 47 |
+
world_size = 0
|
| 48 |
+
for filename in os.listdir(local_dir):
|
| 49 |
+
match = re.match(r"model_world_size_(\d+)_rank_0\.pt", filename)
|
| 50 |
+
if match:
|
| 51 |
+
world_size = match.group(1)
|
| 52 |
+
break
|
| 53 |
+
assert world_size, "No model file with the proper format"
|
| 54 |
+
|
| 55 |
+
state_dict = torch.load(os.path.join(local_dir, f'model_world_size_{world_size}_rank_{rank}.pt'), map_location='cpu')
|
| 56 |
+
pivot_key = sorted(list(state_dict.keys()))[0]
|
| 57 |
+
weight = state_dict[pivot_key]
|
| 58 |
+
assert isinstance(weight, torch.distributed._tensor.DTensor)
|
| 59 |
+
# get sharding info
|
| 60 |
+
device_mesh = weight.device_mesh
|
| 61 |
+
mesh = device_mesh.mesh
|
| 62 |
+
mesh_dim_names = device_mesh.mesh_dim_names
|
| 63 |
+
|
| 64 |
+
print(f'Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}')
|
| 65 |
+
|
| 66 |
+
assert mesh_dim_names in (
|
| 67 |
+
('fsdp',),
|
| 68 |
+
), f'Unsupported mesh_dim_names {mesh_dim_names}'
|
| 69 |
+
|
| 70 |
+
if 'tp' in mesh_dim_names:
|
| 71 |
+
# fsdp * tp
|
| 72 |
+
total_shards = mesh.shape[-1] * mesh.shape[-2]
|
| 73 |
+
mesh_shape = (mesh.shape[-2], mesh.shape[-1])
|
| 74 |
+
else:
|
| 75 |
+
# fsdp
|
| 76 |
+
total_shards = mesh.shape[-1]
|
| 77 |
+
mesh_shape = (mesh.shape[-1],)
|
| 78 |
+
|
| 79 |
+
print(f'Processing model shards with {total_shards} {mesh_shape} in total')
|
| 80 |
+
|
| 81 |
+
model_state_dict_lst = []
|
| 82 |
+
model_state_dict_lst.append(state_dict)
|
| 83 |
+
model_state_dict_lst.extend([""] * (total_shards - 1))
|
| 84 |
+
|
| 85 |
+
def process_one_shard(rank):
|
| 86 |
+
model_path = os.path.join(local_dir, f'model_world_size_{world_size}_rank_{rank}.pt')
|
| 87 |
+
state_dict = torch.load(model_path, map_location='cpu', weights_only=False)
|
| 88 |
+
model_state_dict_lst[rank] = state_dict
|
| 89 |
+
return state_dict
|
| 90 |
+
|
| 91 |
+
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
|
| 92 |
+
for rank in range(1, total_shards):
|
| 93 |
+
executor.submit(process_one_shard, rank)
|
| 94 |
+
state_dict = {}
|
| 95 |
+
param_placements: Dict[str, List[Placement]] = {}
|
| 96 |
+
keys = set(model_state_dict_lst[0].keys())
|
| 97 |
+
for key in keys:
|
| 98 |
+
state_dict[key] = []
|
| 99 |
+
for model_state_dict in model_state_dict_lst:
|
| 100 |
+
try:
|
| 101 |
+
tensor = model_state_dict.pop(key)
|
| 102 |
+
except:
|
| 103 |
+
print("-"*30)
|
| 104 |
+
print(model_state_dict)
|
| 105 |
+
if isinstance(tensor, DTensor):
|
| 106 |
+
state_dict[key].append(tensor._local_tensor.bfloat16())
|
| 107 |
+
placements = tuple(tensor.placements)
|
| 108 |
+
# replicated placement at dp dimension can be discarded
|
| 109 |
+
if mesh_dim_names[0] == 'dp':
|
| 110 |
+
placements = placements[1:]
|
| 111 |
+
if key not in param_placements:
|
| 112 |
+
param_placements[key] = placements
|
| 113 |
+
else:
|
| 114 |
+
assert param_placements[key] == placements
|
| 115 |
+
else:
|
| 116 |
+
state_dict[key] = tensor.bfloat16()
|
| 117 |
+
|
| 118 |
+
del model_state_dict_lst
|
| 119 |
+
|
| 120 |
+
for key in sorted(state_dict):
|
| 121 |
+
if not isinstance(state_dict[key], list):
|
| 122 |
+
print(f"No need to merge key {key}")
|
| 123 |
+
continue
|
| 124 |
+
# merge shards
|
| 125 |
+
placements: Tuple[Shard] = param_placements[key]
|
| 126 |
+
if len(mesh_shape) == 1:
|
| 127 |
+
# 1-D list, FSDP without TP
|
| 128 |
+
assert len(placements) == 1
|
| 129 |
+
shards = state_dict[key]
|
| 130 |
+
state_dict[key] = merge_by_placement(shards, placements[0])
|
| 131 |
+
else:
|
| 132 |
+
# 2-D list, FSDP + TP
|
| 133 |
+
raise NotImplementedError("FSDP + TP is not supported yet")
|
| 134 |
+
|
| 135 |
+
print('Writing to local disk')
|
| 136 |
+
hf_path = os.path.join(local_dir, 'huggingface')
|
| 137 |
+
config = AutoConfig.from_pretrained(hf_path)
|
| 138 |
+
|
| 139 |
+
if 'ForTokenClassification' in config.architectures[0]:
|
| 140 |
+
auto_model = AutoModelForTokenClassification
|
| 141 |
+
elif 'ForCausalLM' in config.architectures[0]:
|
| 142 |
+
auto_model = AutoModelForCausalLM
|
| 143 |
+
elif 'ForConditionalGeneration' in config.architectures[0]:
|
| 144 |
+
auto_model = AutoModelForVision2Seq
|
| 145 |
+
else:
|
| 146 |
+
raise NotImplementedError(f'Unknown architecture {config["architectures"]}')
|
| 147 |
+
|
| 148 |
+
with torch.device('meta'):
|
| 149 |
+
model = auto_model.from_config(config, torch_dtype=torch.bfloat16)
|
| 150 |
+
model.to_empty(device='cpu')
|
| 151 |
+
|
| 152 |
+
print(f'Saving model to {hf_path}')
|
| 153 |
+
model.save_pretrained(hf_path, state_dict=state_dict)
|
| 154 |
+
del state_dict
|
| 155 |
+
del model
|
| 156 |
+
if args.hf_upload_path:
|
| 157 |
+
# Push to hugging face
|
| 158 |
+
from huggingface_hub import HfApi
|
| 159 |
+
api = HfApi()
|
| 160 |
+
api.create_repo(repo_id=args.hf_upload_path, private=False, exist_ok=True)
|
| 161 |
+
api.upload_folder(
|
| 162 |
+
folder_path=hf_path,
|
| 163 |
+
repo_id=args.hf_upload_path,
|
| 164 |
+
repo_type="model"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
|
deep_search/DeepResearcher/signal/data.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"question": "what happened to ray lake of the real thing",
|
| 5 |
+
"think": " I need to find information about Ray Lake from \"The Real Thing\" to answer this question. I will start with a web search. ",
|
| 6 |
+
"tool_call": {
|
| 7 |
+
"arguments": {
|
| 8 |
+
"query": [
|
| 9 |
+
"Ray Lake The Real Thing",
|
| 10 |
+
"Ray Lake update",
|
| 11 |
+
"what happened to Ray Lake"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
"name": "web_search"
|
| 15 |
+
},
|
| 16 |
+
"total_number": 1
|
| 17 |
+
}
|
| 18 |
+
]
|
deep_search/DeepResearcher/signal/data_read.json
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"content": [
|
| 4 |
+
{
|
| 5 |
+
"information": [
|
| 6 |
+
{
|
| 7 |
+
"page_number": 0,
|
| 8 |
+
"page_summary": "The webpage content does not provide any specific information about Ray Lake or what happened to him. It mainly consists of navigation links, language options, and a table of contents for the page about The Real Thing (British band). There is no direct mention of Ray Lake in this part of the webpage."
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"page_number": 1,
|
| 12 |
+
"page_summary": "Ray Lake, a former member of The Real Thing, took his own life on 9 March 2000. He was suffering from a heroin addiction at the time of his death."
|
| 13 |
+
}
|
| 14 |
+
],
|
| 15 |
+
"url": "https://en.wikipedia.org/wiki/The_Real_Thing_(British_band)"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"information": [],
|
| 19 |
+
"url": "https://www.facebook.com/photo.php?fbid=525317314757479&id=144513499504531&set=a.144764332812781"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"information": [
|
| 23 |
+
{
|
| 24 |
+
"page_number": 2,
|
| 25 |
+
"page_summary": "- The Real Thing was one of the most popular groups of the 1970s in the UK, with five top 10 hits.\n- Ray Lake, a member of The Real Thing, passed away in 2000.\n- The band originated from Liverpool and is considered one of Britain’s most successful black groups.\n- The group consisted of Chris Amoo, Dave Smith, Eddie Amoo (Chris's older brother who died in 2018), and Ray Lake.\n- In 2020, a feature documentary titled \"Everything – The Real Thing Story\" was released, directed by Simon Sheridan."
|
| 26 |
+
}
|
| 27 |
+
],
|
| 28 |
+
"url": "https://www.bbc.com/culture/article/20200211-the-real-thing-liverpools-alternative-fab-four"
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"information": [],
|
| 32 |
+
"url": "https://www.bbc.com/news/articles/cm24ey4dv6eo"
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"information": [
|
| 36 |
+
{
|
| 37 |
+
"page_number": 2,
|
| 38 |
+
"page_summary": "According to the webpage content, Ray Lake, a member of The Real Thing, passed away in 2000. Currently, the band is down to a duo, consisting of Chris Amoo and Dave Smith. Other members, such as Eddie (Chris Amoo's older brother), have also passed away, with Eddie's death occurring in 2018."
|
| 39 |
+
}
|
| 40 |
+
],
|
| 41 |
+
"url": "https://www.bbc.com/culture/article/20200211-the-real-thing-liverpools-alternative-fab-four"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"information": [],
|
| 45 |
+
"url": "https://www.facebook.com/photo.php?fbid=525317314757479&id=144513499504531&set=a.144764332812781"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"information": [
|
| 49 |
+
{
|
| 50 |
+
"page_number": 1,
|
| 51 |
+
"page_summary": "Ray Lake, a member of The Real Thing, sadly passed away in 2000."
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"url": "https://www.smoothradio.com/features/the-real-thing-today-where-are-they-now/"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"information": [
|
| 58 |
+
{
|
| 59 |
+
"page_number": 0,
|
| 60 |
+
"page_summary": "The webpage content does not provide any specific information about Ray Lake or what happened to him. It primarily contains navigation links and a table of contents for the Wikipedia page on \"The Real Thing (British band).\" There are sections listed such as History, Group members, Discography, etc., which might contain relevant information on subsequent pages."
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"page_number": 1,
|
| 64 |
+
"page_summary": "Ray Lake, a former member of The Real Thing, took his own life on 9 March 2000. He was suffering from a heroin addiction at the time."
|
| 65 |
+
}
|
| 66 |
+
],
|
| 67 |
+
"url": "https://en.wikipedia.org/wiki/The_Real_Thing_(British_band)"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"information": [
|
| 71 |
+
{
|
| 72 |
+
"page_number": 0,
|
| 73 |
+
"page_summary": "Ray Lake, a founding member of The Real Thing, passed away in 2000. He was born on February 11, 1946, in Liverpool, Lancashire, England. Ray Lake is described as a haunting presence throughout the band's history, and he was known as \"One Take Lake.\" The Real Thing formed in 1970 by Chris Amoo, Dave Smith, Kenny Davis, and Ray Lake. After the departure of Kenny Davis, the band continued with Ray Lake as one of its key members until his death."
|
| 74 |
+
}
|
| 75 |
+
],
|
| 76 |
+
"url": "https://whsc.com.tw/wp-admin_bak/w0vv4/article.php?id=ray-lake-the-real-thing-daughter"
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"information": [
|
| 80 |
+
{
|
| 81 |
+
"page_number": 2,
|
| 82 |
+
"page_summary": "Ray Lake, a member of The Real Thing, passed away in 2000. He was one of the original members of the band alongside Chris Amoo, Dave Smith, and Eddie Amoo. The Real Thing became the first all-black British band to have a UK number one single with their hit \"You To Me Are Everything\" in 1976. Ray Lake was represented by his family during a ceremony where the surviving members, Chris Amoo and Dave Smith, received the Roll of Citizen of Honour of the City of Liverpool."
|
| 83 |
+
}
|
| 84 |
+
],
|
| 85 |
+
"url": "https://www.bbc.com/news/articles/cm24ey4dv6eo"
|
| 86 |
+
}
|
| 87 |
+
],
|
| 88 |
+
"idx": 0,
|
| 89 |
+
"question": "what happened to ray lake of the real thing",
|
| 90 |
+
"think": " I need to find information about Ray Lake from \"The Real Thing\" to answer this question. I will start with a web search. ",
|
| 91 |
+
"tool_call": {
|
| 92 |
+
"arguments": {
|
| 93 |
+
"query": [
|
| 94 |
+
"Ray Lake The Real Thing",
|
| 95 |
+
"Ray Lake update",
|
| 96 |
+
"what happened to Ray Lake"
|
| 97 |
+
],
|
| 98 |
+
"url_list": [
|
| 99 |
+
"https://en.wikipedia.org/wiki/The_Real_Thing_(British_band)",
|
| 100 |
+
"https://whsc.com.tw/wp-admin_bak/w0vv4/article.php?id=ray-lake-the-real-thing-daughter",
|
| 101 |
+
"https://www.bbc.com/news/articles/cm24ey4dv6eo",
|
| 102 |
+
"https://www.bbc.com/culture/article/20200211-the-real-thing-liverpools-alternative-fab-four",
|
| 103 |
+
"https://www.smoothradio.com/features/the-real-thing-today-where-are-they-now/",
|
| 104 |
+
"https://www.facebook.com/photo.php?fbid=525317314757479&id=144513499504531&set=a.144764332812781"
|
| 105 |
+
]
|
| 106 |
+
},
|
| 107 |
+
"name": "browse_webpage"
|
| 108 |
+
},
|
| 109 |
+
"total_number": 1
|
| 110 |
+
}
|
| 111 |
+
]
|
deep_search/DeepResearcher/signal/data_search.json
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"content": [
|
| 4 |
+
{
|
| 5 |
+
"search_query": "Ray Lake The Real Thing",
|
| 6 |
+
"web_page_info_list": [
|
| 7 |
+
{
|
| 8 |
+
"quick_summary": "The Real Thing is a British soul group formed in the 1970s. The band charted internationally with their song \"You to Me Are Everything\"",
|
| 9 |
+
"title": "The Real Thing (British band) - Wikipedia",
|
| 10 |
+
"url": "https://en.wikipedia.org/wiki/The_Real_Thing_(British_band)"
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"quick_summary": "Our dear brother RayLake passed away exactly 20 years ago today. He was the original voice of ChildrenOfTheGhetto. Much missed.",
|
| 14 |
+
"title": "The Real Thing - Our dear brother #RayLake passed away exactly ...",
|
| 15 |
+
"url": "https://www.facebook.com/photo.php?fbid=525317314757479&id=144513499504531&set=a.144764332812781"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"quick_summary": "The band consisted of Chris and his friends Dave Smith, Ray Lake and Kenny Davis (and later joined by Edward Ankrah, younger brother of Joe Ankrah, from The ...",
|
| 19 |
+
"title": "The Real Thing - The Real Thing",
|
| 20 |
+
"url": "https://therealthingofficial.com/"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"quick_summary": "Worse yet was Ray Lake's decline into heroin addiction and, eventually, suicide. The documentary ends happily, though, with the band on stage ...",
|
| 24 |
+
"title": "Everything: The Real Thing Story review – the searing saga of ...",
|
| 25 |
+
"url": "https://www.theguardian.com/tv-and-radio/2020/aug/07/everything-the-real-thing-story"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"quick_summary": "Lake struggled with mental health issues and drug use, leading to his eventual departure from the band, and he was just 48 when he tragically ...",
|
| 29 |
+
"title": "The Real Thing: Liverpool's alternative Fab Four - BBC",
|
| 30 |
+
"url": "https://www.bbc.com/culture/article/20200211-the-real-thing-liverpools-alternative-fab-four"
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"quick_summary": "Find the perfect ray lake real thing stock photo, image, vector, illustration or 360 image. Available for both RF and RM licensing.",
|
| 34 |
+
"title": "Ray lake real thing hi-res stock photography and images - Alamy",
|
| 35 |
+
"url": "https://www.alamy.com/stock-photo/ray-lake-real-thing.html"
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"quick_summary": "He tells me that Ray Lake from the band died a couple of years ago, reportedly the victim of a drugs overdose. Dave Smith is still in the Real Thing, as is ...",
|
| 39 |
+
"title": "Children of the Ghetto; the Story of the Real Thing (Autumn 2002)",
|
| 40 |
+
"url": "http://www.davehaslam.com/children-of-the-ghetto-the-story-of-the-real-thing-autumn-2002/"
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"quick_summary": "The only regret Chris has is that the band's manager Tony Hall and two original members of the Real Thing are no longer alive. Ray Lake died in 2000 aged 48 ...",
|
| 44 |
+
"title": "The Real Thing: getting by on luck – and talent - Steve Orme",
|
| 45 |
+
"url": "https://steveorme.co.uk/articles/the-real-thing-get-by-on-luck-and-talent/"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"quick_summary": "Lake's erratic behavior forces band members to fire him. Back in Liverpool, police arrest him for burgling houses. In 1992, Lake and his family relocate to ...",
|
| 49 |
+
"title": "Everything: The Real Thing Story - Films Media Group",
|
| 50 |
+
"url": "https://www.films.com/ecTitleDetail.aspx?TitleID=283085"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"quick_summary": "The other band members, Amoo's elder brother Eddie, who died six years ago, and Ray Lake, who died in 2000, were represented by their families.",
|
| 54 |
+
"title": "The Real Thing: Liverpool honours 1970s chart-topping soul stars",
|
| 55 |
+
"url": "https://www.bbc.com/news/articles/cm24ey4dv6eo"
|
| 56 |
+
}
|
| 57 |
+
]
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"search_query": "Ray Lake update",
|
| 61 |
+
"web_page_info_list": [
|
| 62 |
+
{
|
| 63 |
+
"quick_summary": "Ray Lake visitors can hike, ride bikes or horses, and cross-country ski on a 2.3-mile gravel trail that winds around wetlands and open prairies.",
|
| 64 |
+
"title": "Ray Lake - Lake County Forest Preserves",
|
| 65 |
+
"url": "https://www.lcfpd.org/ray-lake/"
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"quick_summary": "Trails Update. March 31, 2025. Johnson Branch. DORBA trails are OPEN. Isle du Bois/Greenbelt. All Isle du Bois Trails Closed",
|
| 69 |
+
"title": "Trails Update for Ray Roberts Lake - Texas Parks and Wildlife",
|
| 70 |
+
"url": "https://tpwd.texas.gov/state-parks/park-information/notices/ray-rob-trails-update"
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"quick_summary": "Ray Lake visitors can hike, ride bikes or horses, and cross-country ski on a 2.3-mile gravel trail that winds around wetlands and open prairies.",
|
| 74 |
+
"title": "Lake County, Illinois, CVB - Ray Lake Forest Preserve",
|
| 75 |
+
"url": "https://www.visitlakecounty.org/RayLakeForestPreserve"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"quick_summary": "Couple miles SE of Ray Lake. Fremont County, WY. Acres: 9. Containment: 100%. Status: Inactive. Created by WildCAD • Mar 27, 2025 at 2:52 PM.",
|
| 79 |
+
"title": "Ray Lake Fire Map | Watch Duty",
|
| 80 |
+
"url": "https://app.watchduty.org/i/44940"
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"quick_summary": "Ray Lake boasts three wetlands totaling 120 acres, 80 acres of ... updated on March 30, 2025 at 2:43 am GMT. Download Open map. Explore · Countries ...",
|
| 84 |
+
"title": "Ray Lake Loop Trail, Illinois - 391 Reviews, Map | AllTrails",
|
| 85 |
+
"url": "https://www.alltrails.com/trail/us/illinois/ray-lake-loop-trail"
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"quick_summary": "Rosensteel's Restaurant, Bar & Fishing Lakes, Formerly Ray's Lake. Charles Kalb yes we are open all year round. No stock yet. Weather been crazy ...",
|
| 89 |
+
"title": "Rosensteel's Restaurant, Bar & Fishing Lakes, Formerly Ray's Lake.",
|
| 90 |
+
"url": "https://www.facebook.com/rosensteels/posts/1068123448652605/"
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"quick_summary": "Latest News and Updates. Ray Roberts Lake is leased to Texas Parks and Wildlife and as partners we are working together toward the preservation and ...",
|
| 94 |
+
"title": "Ray Roberts Lake - Fort Worth District Water Management",
|
| 95 |
+
"url": "https://www.swf-wc.usace.army.mil/rayroberts/"
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"quick_summary": "Get current Ray-Lake-water-temperature water temperature, fishing reports, and lake conditions. Real-time updates on water temperature, clarity, and depth.",
|
| 99 |
+
"title": "Ray-Lake-water-temperature",
|
| 100 |
+
"url": "https://lakemonster.com/lake/Wyoming-lakes/Ray-Lake-water-temperature-2288"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"quick_summary": "Our Board of Commissioners approved the purchase of two new parcels of land as additions to Ray Lake Forest Preserve (Wauconda) -- 61.8 acres in March.",
|
| 104 |
+
"title": "Ray Lake Grows Again - Board & Committees - News",
|
| 105 |
+
"url": "https://www.lcfpd.org/ray-lake-grows-again/"
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"quick_summary": "2 reviews of RAY LAKE \"Glad to have this forest preserve near home for us to take our dogs for a great nature walk. Great place to jog or ride a bike ...",
|
| 109 |
+
"title": "Ray Lake - 23239 W Erhart Rd, Mundelein, Illinois - Yelp",
|
| 110 |
+
"url": "https://www.yelp.com/biz/ray-lake-mundelein"
|
| 111 |
+
}
|
| 112 |
+
]
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"search_query": "what happened to Ray Lake",
|
| 116 |
+
"web_page_info_list": [
|
| 117 |
+
{
|
| 118 |
+
"quick_summary": "Lake struggled with mental health issues and drug use, leading to his eventual departure from the band, and he was just 48 when he tragically ...",
|
| 119 |
+
"title": "The Real Thing: Liverpool's alternative Fab Four - BBC",
|
| 120 |
+
"url": "https://www.bbc.com/culture/article/20200211-the-real-thing-liverpools-alternative-fab-four"
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"quick_summary": "Worse yet was Ray Lake's decline into heroin addiction and, eventually, suicide. The documentary ends happily, though, with the band on stage ...",
|
| 124 |
+
"title": "Everything: The Real Thing Story review – the searing saga of ...",
|
| 125 |
+
"url": "https://www.theguardian.com/tv-and-radio/2020/aug/07/everything-the-real-thing-story"
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"quick_summary": "Our dear brother RayLake passed away exactly 20 years ago today. He was the original voice of ChildrenOfTheGhetto. Much missed.",
|
| 129 |
+
"title": "The Real Thing - Our dear brother #RayLake passed away exactly ...",
|
| 130 |
+
"url": "https://www.facebook.com/photo.php?fbid=525317314757479&id=144513499504531&set=a.144764332812781"
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"quick_summary": "Kenny Davis left the group relatively early on, and member Ray Lake sadly passed away in 2000. From April, they were set to be embarking on ...",
|
| 134 |
+
"title": "The Real Thing: Where are they now? - Smooth",
|
| 135 |
+
"url": "https://www.smoothradio.com/features/the-real-thing-today-where-are-they-now/"
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"quick_summary": "71, remaining for only two weeks. Ray Lake took his own life on 9 March 2000. He was suffering from a heroin addiction. In 2002, Daft Punk's Thomas Bangalter ...",
|
| 139 |
+
"title": "The Real Thing (British band) - Wikipedia",
|
| 140 |
+
"url": "https://en.wikipedia.org/wiki/The_Real_Thing_(British_band)"
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"quick_summary": "Ray Lake died in 2000 aged 48 from a drugs overdose while Chris's brother Eddy passed away suddenly in 2018 aged 74. Chris says it was very difficult being ...",
|
| 144 |
+
"title": "The Real Thing: getting by on luck – and talent - Steve Orme",
|
| 145 |
+
"url": "https://steveorme.co.uk/articles/the-real-thing-get-by-on-luck-and-talent/"
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"quick_summary": "Unfortunately, Ray Lake's personal problems were starting to create difficulties, and he left The Real Thing in 1991. He died nine years later, aged just 48.",
|
| 149 |
+
"title": "The Real Thing - The Real Thing",
|
| 150 |
+
"url": "https://therealthingofficial.com/"
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"quick_summary": "Kenny Davis left the group relatively early on, and member Ray Lake sadly passed away in 2000. A classic. Pye put a sizeable marketing budget ...",
|
| 154 |
+
"title": "ray lake the real thing daughter - 萬華運動中心",
|
| 155 |
+
"url": "https://whsc.com.tw/wp-admin_bak/w0vv4/article.php?id=ray-lake-the-real-thing-daughter"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"quick_summary": "The other band members, Amoo's elder brother Eddie, who died six years ago, and Ray Lake, who died in 2000, were represented by their families.",
|
| 159 |
+
"title": "The Real Thing: Liverpool honours 1970s chart-topping soul stars",
|
| 160 |
+
"url": "https://www.bbc.com/news/articles/cm24ey4dv6eo"
|
| 161 |
+
}
|
| 162 |
+
]
|
| 163 |
+
}
|
| 164 |
+
],
|
| 165 |
+
"idx": 0,
|
| 166 |
+
"question": "what happened to ray lake of the real thing",
|
| 167 |
+
"think": " I need to find information about Ray Lake from \"The Real Thing\" to answer this question. I will start with a web search. ",
|
| 168 |
+
"tool_call": {
|
| 169 |
+
"arguments": {
|
| 170 |
+
"query": [
|
| 171 |
+
"Ray Lake The Real Thing",
|
| 172 |
+
"Ray Lake update",
|
| 173 |
+
"what happened to Ray Lake"
|
| 174 |
+
]
|
| 175 |
+
},
|
| 176 |
+
"name": "web_search"
|
| 177 |
+
},
|
| 178 |
+
"total_number": 1
|
| 179 |
+
}
|
| 180 |
+
]
|
deep_search/DeepResearcher/signal/signal.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"signal": 0
|
| 3 |
+
}
|
deep_search/DeepResearcher/tests/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
deep_search/DeepResearcher/tests/e2e/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/data/create_dataset.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from tests.e2e.envs.digit_completion import DigitCompletion, generate_ground_truth_response
|
| 16 |
+
from torch.utils import data
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
if __name__ == '__main__':
|
| 20 |
+
simple_task = DigitCompletion(max_number=9, max_diff=9, max_num_in_response=9)
|
| 21 |
+
all_prompts = simple_task.get_all_prompts()
|
| 22 |
+
|
| 23 |
+
# 21 * 6 * 4
|
| 24 |
+
train_data, test_data = data.random_split(all_prompts, lengths=[0.8, 0.2])
|
| 25 |
+
train_data = list(train_data)
|
| 26 |
+
test_data = list(test_data)
|
| 27 |
+
|
| 28 |
+
train_data = [[{'role': 'user', 'content': str(item)}] \
|
| 29 |
+
for item in train_data]
|
| 30 |
+
test_data = [[{'role': 'user', 'content': str(item)}] \
|
| 31 |
+
for item in test_data]
|
| 32 |
+
|
| 33 |
+
print(f'Size of train: {len(train_data)}, size of test: {len(test_data)}')
|
| 34 |
+
|
| 35 |
+
train_data = {'prompt': train_data}
|
| 36 |
+
test_data = {'prompt': test_data}
|
| 37 |
+
|
| 38 |
+
model_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)))
|
| 39 |
+
|
| 40 |
+
import pandas as pd
|
| 41 |
+
|
| 42 |
+
train_data_frame = pd.DataFrame(train_data)
|
| 43 |
+
test_data_frame = pd.DataFrame(test_data)
|
| 44 |
+
|
| 45 |
+
train_data_frame.to_parquet(os.path.join(model_folder, 'train.parquet'))
|
| 46 |
+
test_data_frame.to_parquet(os.path.join(model_folder, 'test.parquet'))
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": null,
|
| 8 |
+
"eos_token_id": 1,
|
| 9 |
+
"hidden_act": "silu",
|
| 10 |
+
"hidden_size": 128,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 344,
|
| 13 |
+
"max_position_embeddings": 2048,
|
| 14 |
+
"mlp_bias": false,
|
| 15 |
+
"model_type": "llama",
|
| 16 |
+
"num_attention_heads": 4,
|
| 17 |
+
"num_hidden_layers": 4,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"pad_token_id": 2,
|
| 20 |
+
"pretraining_tp": 1,
|
| 21 |
+
"rms_norm_eps": 1e-06,
|
| 22 |
+
"rope_scaling": null,
|
| 23 |
+
"rope_theta": 10000.0,
|
| 24 |
+
"tie_word_embeddings": false,
|
| 25 |
+
"torch_dtype": "bfloat16",
|
| 26 |
+
"transformers_version": "4.43.3",
|
| 27 |
+
"use_cache": true,
|
| 28 |
+
"vocab_size": 16
|
| 29 |
+
}
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/create_model_tokenizer.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Create a random model and tokenizer for PPO training
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import os
|
| 20 |
+
from transformers import AutoModelForCausalLM, LlamaConfig, AutoTokenizer
|
| 21 |
+
|
| 22 |
+
from tests.e2e.envs.digit_completion import CharTokenizer
|
| 23 |
+
|
| 24 |
+
tokenizer = CharTokenizer(
|
| 25 |
+
characters=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',', ':'],
|
| 26 |
+
model_max_length=2048,
|
| 27 |
+
chat_template=
|
| 28 |
+
"{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set role = message['role'] %}{{ message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ sep_token }}{% endif %}"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
config = LlamaConfig(vocab_size=(tokenizer.vocab_size + 16 - 1) // 16 * 16,
|
| 32 |
+
hidden_size=128,
|
| 33 |
+
intermediate_size=344,
|
| 34 |
+
num_hidden_layers=4,
|
| 35 |
+
num_attention_heads=4,
|
| 36 |
+
num_key_value_heads=4,
|
| 37 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 38 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 39 |
+
eos_token_id=tokenizer.eos_token_id)
|
| 40 |
+
|
| 41 |
+
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
|
| 42 |
+
|
| 43 |
+
model_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)))
|
| 44 |
+
os.makedirs(model_folder, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
model.save_pretrained(model_folder)
|
| 47 |
+
|
| 48 |
+
tokenizer_folder = model_folder
|
| 49 |
+
tokenizer.save_pretrained(tokenizer_folder)
|
| 50 |
+
|
| 51 |
+
load_tokenizer = AutoTokenizer.from_pretrained(tokenizer_folder)
|
| 52 |
+
|
| 53 |
+
chat = [{'role': 'user', 'content': '1,0:2,3'}]
|
| 54 |
+
|
| 55 |
+
load_tokenizer.padding_side = 'left'
|
| 56 |
+
print(
|
| 57 |
+
load_tokenizer.apply_chat_template(chat,
|
| 58 |
+
tokenize=True,
|
| 59 |
+
add_generation_prompt=True,
|
| 60 |
+
max_length=10,
|
| 61 |
+
padding='max_length'))
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"eos_token_id": 1,
|
| 4 |
+
"pad_token_id": 2,
|
| 5 |
+
"transformers_version": "4.43.3"
|
| 6 |
+
}
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/model/tokenizer_config.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"char_ords": [
|
| 3 |
+
48,
|
| 4 |
+
49,
|
| 5 |
+
50,
|
| 6 |
+
51,
|
| 7 |
+
52,
|
| 8 |
+
53,
|
| 9 |
+
54,
|
| 10 |
+
55,
|
| 11 |
+
56,
|
| 12 |
+
57,
|
| 13 |
+
44,
|
| 14 |
+
58
|
| 15 |
+
],
|
| 16 |
+
"model_max_length": 2048,
|
| 17 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set role = message['role'] %}{{ message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ sep_token }}{% endif %}"
|
| 18 |
+
}
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/rl/README.md
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Digit completion
|
| 2 |
+
|
| 3 |
+
This is an example of solving a digit completion problem. The problem is defined as below:
|
| 4 |
+
|
| 5 |
+
The prompt is a sequence of numbers with fixed difference. The agent's goal is to complete the next N numbers.
|
| 6 |
+
If the max number is reached, the next number should be modulo with max number.
|
| 7 |
+
|
| 8 |
+
For example,
|
| 9 |
+
- prompt = [1, 2, 3]
|
| 10 |
+
- N = 5
|
| 11 |
+
- max_number = 6
|
| 12 |
+
|
| 13 |
+
The response should be [4, 5, 6, 7%6, 8%6] = [4, 5, 6, 0, 1].
|
| 14 |
+
|
| 15 |
+
# Environment definition
|
| 16 |
+
|
| 17 |
+
The core definition of the task is defined in verl/envs/digit_completion/task.py
|
| 18 |
+
|
| 19 |
+
It is highly recommended to take a look at it for better understanding.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# Run experiments
|
| 24 |
+
|
| 25 |
+
The users are required to specify the config path and config name (and the relative model config path to the current working directory)
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
# cd examples/arithmetic_sequence/rl
|
| 29 |
+
|
| 30 |
+
# Specify the config path and config name (current working dir)
|
| 31 |
+
python3 -m verl.trainer.ppo.ray_megatron_train_synchronous --config-path=$(pwd)/config --config-name='ray_megatron'
|
| 32 |
+
|
| 33 |
+
# The default relative path of model config is 'config/model_config', if you want to change it, you can rewrite it in ray_megatron.yaml or using:
|
| 34 |
+
python3 -m verl.trainer.ppo.ray_megatron_train_synchronous --config-path=$(pwd)/config --config-name='ray_megatron' ++model.base_path=config/model_config
|
| 35 |
+
|
| 36 |
+
```
|
| 37 |
+
|
deep_search/DeepResearcher/tests/e2e/arithmetic_sequence/rl/main_trainer.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Using FSDPTrainer
|
| 16 |
+
"""
|
| 17 |
+
import os
|
| 18 |
+
import hydra
|
| 19 |
+
import ray
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import PreTrainedTokenizer, AutoTokenizer
|
| 22 |
+
|
| 23 |
+
from verl import DataProto
|
| 24 |
+
from verl.trainer.ppo.ray_trainer import RayPPOTrainer
|
| 25 |
+
from verl.utils.fs import copy_to_local
|
| 26 |
+
from tests.e2e.envs.digit_completion import CharTokenizer
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def make_reward_function(tokenizer, num_examine):
|
| 30 |
+
|
| 31 |
+
def arithmetic_sequence_reward_function(data: DataProto):
|
| 32 |
+
from tests.e2e.envs.digit_completion.task import compute_reward
|
| 33 |
+
reward_tensor = torch.zeros_like(data.batch['responses'], dtype=torch.float32)
|
| 34 |
+
|
| 35 |
+
for i in range(data.batch.batch_size[0]):
|
| 36 |
+
data_item = data[i] # DataProtoItem
|
| 37 |
+
|
| 38 |
+
prompt_ids = data_item.batch['prompts']
|
| 39 |
+
|
| 40 |
+
prompt_length = prompt_ids.shape[-1]
|
| 41 |
+
|
| 42 |
+
# extract raw prompt
|
| 43 |
+
valid_prompt_length = data_item.batch['attention_mask'][:prompt_length].sum()
|
| 44 |
+
valid_prompt_ids = prompt_ids[-valid_prompt_length:]
|
| 45 |
+
|
| 46 |
+
# extract response
|
| 47 |
+
response_ids = data_item.batch['responses']
|
| 48 |
+
response_length = response_ids.shape[-1]
|
| 49 |
+
response_mask = data.batch['attention_mask'][i][-response_length:]
|
| 50 |
+
valid_response_length = data_item.batch['attention_mask'][prompt_length:].sum()
|
| 51 |
+
valid_response_ids = response_ids[:valid_response_length]
|
| 52 |
+
|
| 53 |
+
# decode
|
| 54 |
+
prompt = tokenizer.decode(valid_prompt_ids)
|
| 55 |
+
response = tokenizer.decode(valid_response_ids)
|
| 56 |
+
# remove bos and eos
|
| 57 |
+
prompt = prompt.replace(tokenizer.sep_token, '')
|
| 58 |
+
response = response.replace(tokenizer.eos_token, '')
|
| 59 |
+
if i < num_examine:
|
| 60 |
+
print(prompt, response)
|
| 61 |
+
|
| 62 |
+
reward_output = compute_reward(prompt, response)
|
| 63 |
+
dense_reward = reward_output[0].tolist()
|
| 64 |
+
ground_truth_response = reward_output[1]['ground_truth_response']
|
| 65 |
+
if len(dense_reward) > 0:
|
| 66 |
+
last_reward = dense_reward[-1]
|
| 67 |
+
else:
|
| 68 |
+
if len(ground_truth_response) == 0:
|
| 69 |
+
last_reward = 1
|
| 70 |
+
else:
|
| 71 |
+
last_reward = 0
|
| 72 |
+
|
| 73 |
+
# pad to response_length
|
| 74 |
+
for _ in range(reward_tensor.shape[-1] - len(dense_reward)):
|
| 75 |
+
dense_reward.append(last_reward)
|
| 76 |
+
|
| 77 |
+
dense_reward = torch.as_tensor(dense_reward, dtype=torch.float32, device=reward_tensor.device)
|
| 78 |
+
reward_tensor[i] = dense_reward * response_mask
|
| 79 |
+
|
| 80 |
+
return reward_tensor
|
| 81 |
+
|
| 82 |
+
return arithmetic_sequence_reward_function
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@hydra.main(config_path='../../../../verl/trainer/config', config_name='ppo_trainer', version_base=None)
|
| 86 |
+
def main(config):
|
| 87 |
+
ray.init(
|
| 88 |
+
runtime_env={
|
| 89 |
+
'env_vars': {
|
| 90 |
+
'MEGATRON_USE_CUDA_TIMER': '0',
|
| 91 |
+
'MEGATRON_START_PROCESS_TIMER': 'False',
|
| 92 |
+
'TOKENIZERS_PARALLELISM': 'true',
|
| 93 |
+
'NCCL_DEBUG': 'WARN'
|
| 94 |
+
}
|
| 95 |
+
})
|
| 96 |
+
|
| 97 |
+
# print initial config
|
| 98 |
+
from pprint import pprint
|
| 99 |
+
from omegaconf import OmegaConf
|
| 100 |
+
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
|
| 101 |
+
|
| 102 |
+
# print the config
|
| 103 |
+
# print initial config
|
| 104 |
+
print('Config after normalizing batch_size')
|
| 105 |
+
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
|
| 106 |
+
|
| 107 |
+
# download the checkpoint from hdfs
|
| 108 |
+
local_path = copy_to_local(config.actor_rollout_ref.model.path)
|
| 109 |
+
local_path = os.path.expanduser(local_path)
|
| 110 |
+
# instantiate tokenizern
|
| 111 |
+
tokenizer = AutoTokenizer.from_pretrained(local_path)
|
| 112 |
+
print(f'Tokenizer vocab_size: {tokenizer.vocab_size}')
|
| 113 |
+
|
| 114 |
+
# define worker classes
|
| 115 |
+
from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker
|
| 116 |
+
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role
|
| 117 |
+
|
| 118 |
+
role_worker_mapping = {
|
| 119 |
+
Role.ActorRollout: ray.remote(ActorRolloutRefWorker),
|
| 120 |
+
Role.Critic: ray.remote(CriticWorker),
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
global_pool_id = 'global_pool'
|
| 124 |
+
resource_pool_spec = {
|
| 125 |
+
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
|
| 126 |
+
}
|
| 127 |
+
mapping = {
|
| 128 |
+
Role.ActorRollout: global_pool_id,
|
| 129 |
+
Role.Critic: global_pool_id,
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
reward_fn = make_reward_function(tokenizer=tokenizer, num_examine=1)
|
| 133 |
+
|
| 134 |
+
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
|
| 135 |
+
|
| 136 |
+
trainer = RayPPOTrainer(config=config,
|
| 137 |
+
tokenizer=tokenizer,
|
| 138 |
+
role_worker_mapping=role_worker_mapping,
|
| 139 |
+
resource_pool_manager=resource_pool_manager,
|
| 140 |
+
reward_fn=reward_fn,
|
| 141 |
+
val_reward_fn=reward_fn)
|
| 142 |
+
trainer.init_workers()
|
| 143 |
+
trainer.fit()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
if __name__ == '__main__':
|
| 147 |
+
main()
|
deep_search/DeepResearcher/tests/e2e/check_results.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def extract_reward_from_line(line):
|
| 21 |
+
# TODO: this function needs error handling
|
| 22 |
+
try:
|
| 23 |
+
key_vals = line.split(' - ')
|
| 24 |
+
for key_val in key_vals:
|
| 25 |
+
key, val = key_val.split(':')
|
| 26 |
+
if key == 'critic/rewards/mean':
|
| 27 |
+
reward = float(val)
|
| 28 |
+
return reward
|
| 29 |
+
return -np.inf
|
| 30 |
+
except Exception:
|
| 31 |
+
return -np.inf
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if __name__ == '__main__':
|
| 35 |
+
parser = argparse.ArgumentParser()
|
| 36 |
+
parser.add_argument('--output_file', required=True, type=str)
|
| 37 |
+
|
| 38 |
+
args = parser.parse_args()
|
| 39 |
+
|
| 40 |
+
with open(args.output_file, 'r') as f:
|
| 41 |
+
output = f.read().split('\n')
|
| 42 |
+
|
| 43 |
+
best_reward = -np.inf
|
| 44 |
+
for line in output:
|
| 45 |
+
if line.startswith('step'):
|
| 46 |
+
reward = extract_reward_from_line(line)
|
| 47 |
+
if reward > best_reward:
|
| 48 |
+
best_reward = reward
|
| 49 |
+
|
| 50 |
+
print(f'Best reward is {best_reward}')
|
| 51 |
+
assert best_reward > 0.2, f'Best reward must be greater than 0.2. best_reward: {best_reward}'
|
| 52 |
+
print('Check passes')
|
deep_search/DeepResearcher/tests/e2e/envs/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from .digit_completion import DigitCompletion
|
| 16 |
+
|
| 17 |
+
__all__ = ['DigitCompletion']
|
deep_search/DeepResearcher/tests/e2e/envs/digit_completion/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from .task import DigitCompletion, generate_ground_truth_response
|
| 16 |
+
from .tokenizer import CharTokenizer
|
| 17 |
+
|
| 18 |
+
from transformers import AutoTokenizer, LlamaConfig
|
| 19 |
+
|
| 20 |
+
AutoTokenizer.register(LlamaConfig, CharTokenizer, exist_ok=True)
|
| 21 |
+
|
| 22 |
+
__all__ = ['DigitCompletion', 'generate_ground_truth_response', 'CharTokenizer']
|
deep_search/DeepResearcher/tests/e2e/envs/digit_completion/task.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Task and environment definition for digit completion."""
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DigitCompletion(object):
|
| 20 |
+
"""
|
| 21 |
+
The implementation of a simple digit completion task.
|
| 22 |
+
The prompt is a sequence of numbers with fixed difference. The task is to complete the next N numbers.
|
| 23 |
+
If the max number is reached, the next number should be modulo with max number.
|
| 24 |
+
|
| 25 |
+
For example,
|
| 26 |
+
- prompt = [1, 2, 3]
|
| 27 |
+
- N = 5
|
| 28 |
+
- max_number = 6
|
| 29 |
+
|
| 30 |
+
the response should be [4, 5, 6, 7%6, 8%6] = [4, 5, 6, 0, 1]
|
| 31 |
+
|
| 32 |
+
Note that the tokenizer is char-level to increase the difficulty.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, max_number: int, max_diff: int, max_num_in_response: int, seed=0):
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
max_number: the maximum number allowed in the arithmetic sequence
|
| 40 |
+
max_diff: the maximum diff. The actual common diff will be sampled from [0, max_diff]
|
| 41 |
+
max_num_in_response: the maximum number in the response
|
| 42 |
+
"""
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.max_number = max_number
|
| 45 |
+
self.max_diff = max_diff
|
| 46 |
+
self.max_num_in_response = max_num_in_response
|
| 47 |
+
assert self.max_num_in_response < 10
|
| 48 |
+
assert self.max_number > 0
|
| 49 |
+
assert self.max_diff > 0
|
| 50 |
+
self.max_number_length = len(str(max_number))
|
| 51 |
+
# {num1},{num2}:{max_num_in_response},{max_number}
|
| 52 |
+
self._prompt_length = self.max_number_length * 2 + 4 + self.max_number_length # no negative is allowed
|
| 53 |
+
|
| 54 |
+
self.np_rng = np.random.default_rng(seed=seed)
|
| 55 |
+
|
| 56 |
+
def __str__(self):
|
| 57 |
+
return f'Prompt length: {self.prompt_length}. Response length: {self.response_length}, ' \
|
| 58 |
+
f'Max number: {self.max_number}. Max diff: {self.max_diff}, ' \
|
| 59 |
+
f'Max number in response: {self.max_num_in_response}'
|
| 60 |
+
|
| 61 |
+
def get_state(self):
|
| 62 |
+
return {'rng': self.np_rng}
|
| 63 |
+
|
| 64 |
+
def set_state(self, state):
|
| 65 |
+
assert 'rng' in state, 'rng must be inside state'
|
| 66 |
+
self.np_rng = state['rng']
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def prompt_length(self):
|
| 70 |
+
return self._prompt_length
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def response_length(self):
|
| 74 |
+
# number length + comma length + [EOS]
|
| 75 |
+
# The actual number times 1.5 to allow 'U'
|
| 76 |
+
return (self.max_num_in_response * self.max_number_length + (self.max_num_in_response - 1) + 1) * 2
|
| 77 |
+
|
| 78 |
+
def add(self, a, b):
|
| 79 |
+
return (a + b) % self.max_number
|
| 80 |
+
|
| 81 |
+
def get_all_prompts(self):
|
| 82 |
+
all_prompts = []
|
| 83 |
+
for first_num in range(self.max_number + 1):
|
| 84 |
+
for diff in range(0, self.max_diff + 1):
|
| 85 |
+
second_num = self.add(first_num, diff)
|
| 86 |
+
for num_to_complete in range(self.max_num_in_response + 1):
|
| 87 |
+
prompt = str(first_num) + ',' + str(second_num) + f':{self.max_number},{num_to_complete}'
|
| 88 |
+
all_prompts.append(prompt)
|
| 89 |
+
return all_prompts
|
| 90 |
+
|
| 91 |
+
def sample_str_prompts(self):
|
| 92 |
+
# step 1: sample initial numbers
|
| 93 |
+
first_num = self.np_rng.integers(self.max_number + 1)
|
| 94 |
+
diff = self.np_rng.integers(self.max_diff + 1)
|
| 95 |
+
second_num = self.add(first_num, diff)
|
| 96 |
+
num_to_complete = self.np_rng.integers(self.max_num_in_response + 1)
|
| 97 |
+
prompt = str(first_num) + ',' + str(second_num) + f':{self.max_number},{num_to_complete}'
|
| 98 |
+
return prompt
|
| 99 |
+
|
| 100 |
+
def sample_batch_str_prompts(self, batch_size):
|
| 101 |
+
str_prompts = []
|
| 102 |
+
for _ in range(batch_size):
|
| 103 |
+
str_prompts.append(self.sample_str_prompts())
|
| 104 |
+
return str_prompts
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def compute_attention_mask(prompts, pad_token_id):
|
| 108 |
+
mask = np.ones_like(prompts)
|
| 109 |
+
mask[prompts == pad_token_id] = 0
|
| 110 |
+
return mask
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def compute_position_id_with_mask(mask):
|
| 114 |
+
return np.clip(np.cumsum(mask, axis=-1) - 1, a_min=0, a_max=None)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def generate_ground_truth_response(prompt: str):
|
| 118 |
+
"""Generate ground truth response given a prompt."""
|
| 119 |
+
num, info = prompt.split(':')
|
| 120 |
+
num1, num2 = num.split(',')
|
| 121 |
+
max_number, num_to_gen = info.split(',')
|
| 122 |
+
num1 = int(num1)
|
| 123 |
+
num2 = int(num2)
|
| 124 |
+
max_number = int(max_number)
|
| 125 |
+
num_to_gen = int(num_to_gen)
|
| 126 |
+
diff = (num2 - num1) % max_number
|
| 127 |
+
results = []
|
| 128 |
+
last_num = num2
|
| 129 |
+
for _ in range(num_to_gen):
|
| 130 |
+
curr = (last_num + diff) % max_number
|
| 131 |
+
results.append(str(curr))
|
| 132 |
+
last_num = curr
|
| 133 |
+
response = ','.join(results)
|
| 134 |
+
return response
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def compute_reward(prompt: str, response: str, sequence_reward=1.):
|
| 138 |
+
"""We compute dense reward here so that we can directly train RL without SFT"""
|
| 139 |
+
response_length = len(response)
|
| 140 |
+
ground_truth_response = generate_ground_truth_response(prompt)
|
| 141 |
+
per_token_reward = sequence_reward / (len(ground_truth_response) + 1) # including [EOS]
|
| 142 |
+
|
| 143 |
+
# pad
|
| 144 |
+
reward = np.zeros(response_length, dtype=np.float32) # this assumes that each char is a token
|
| 145 |
+
# assign reward until mismatches
|
| 146 |
+
ground_truth_idx = 0
|
| 147 |
+
for i in range(response_length):
|
| 148 |
+
if ground_truth_idx == len(ground_truth_response):
|
| 149 |
+
break
|
| 150 |
+
|
| 151 |
+
ground_truth_response_token = ground_truth_response[ground_truth_idx]
|
| 152 |
+
response_token = response[i]
|
| 153 |
+
if ground_truth_response_token == response_token:
|
| 154 |
+
reward[i] = per_token_reward
|
| 155 |
+
ground_truth_idx += 1
|
| 156 |
+
else:
|
| 157 |
+
# no matches
|
| 158 |
+
break
|
| 159 |
+
|
| 160 |
+
return reward, {'ground_truth_response': ground_truth_response}
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
if __name__ == '__main__':
|
| 164 |
+
task = DigitCompletion(max_number=20, max_diff=3, max_num_in_response=5)
|
| 165 |
+
print(task.sample_str_prompts())
|
| 166 |
+
|
| 167 |
+
prompt = '7,8:20,0'
|
| 168 |
+
response = ''
|
| 169 |
+
print(compute_reward(prompt, response))
|
| 170 |
+
|
| 171 |
+
prompt = '7,8:20,0'
|
| 172 |
+
response = 'E000'
|
| 173 |
+
print(compute_reward(prompt, response))
|
| 174 |
+
|
| 175 |
+
prompt = '9,10:20,2'
|
| 176 |
+
response = '11,12,13'
|
| 177 |
+
print(compute_reward(prompt, response))
|
deep_search/DeepResearcher/tests/e2e/envs/digit_completion/tokenizer.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Copied from https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
|
| 15 |
+
|
| 16 |
+
CharacterTokenzier for Hugging Face Transformers.
|
| 17 |
+
|
| 18 |
+
This is heavily inspired from CanineTokenizer in transformers package.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import json
|
| 22 |
+
import os
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
from typing import Dict, List, Optional, Sequence, Union
|
| 25 |
+
|
| 26 |
+
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CharTokenizer(PreTrainedTokenizer):
|
| 30 |
+
|
| 31 |
+
def __init__(self, characters: Sequence[str], model_max_length: int, chat_template, **kwargs):
|
| 32 |
+
"""Character tokenizer for Hugging Face transformers.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
characters (Sequence[str]): List of desired characters. Any character which
|
| 36 |
+
is not included in this list will be replaced by a special token called
|
| 37 |
+
[UNK] with id=6. Following are list of all of the special tokens with
|
| 38 |
+
their corresponding ids:
|
| 39 |
+
"[CLS]": 0
|
| 40 |
+
"[SEP]": 1
|
| 41 |
+
"[BOS]": 2
|
| 42 |
+
"[MASK]": 3
|
| 43 |
+
"[PAD]": 4
|
| 44 |
+
"[RESERVED]": 5
|
| 45 |
+
"[UNK]": 6
|
| 46 |
+
an id (starting at 7) will be assigned to each character.
|
| 47 |
+
|
| 48 |
+
model_max_length (int): Model maximum sequence length.
|
| 49 |
+
"""
|
| 50 |
+
eos_token_str = 'E'
|
| 51 |
+
sep_token_str = 'S'
|
| 52 |
+
pad_token_str = 'P'
|
| 53 |
+
unk_token_str = 'U'
|
| 54 |
+
|
| 55 |
+
self.characters = characters
|
| 56 |
+
self.model_max_length = model_max_length
|
| 57 |
+
eos_token = AddedToken(eos_token_str, lstrip=False, rstrip=False)
|
| 58 |
+
sep_token = AddedToken(sep_token_str, lstrip=False, rstrip=False)
|
| 59 |
+
pad_token = AddedToken(pad_token_str, lstrip=False, rstrip=False)
|
| 60 |
+
unk_token = AddedToken(unk_token_str, lstrip=False, rstrip=False)
|
| 61 |
+
|
| 62 |
+
self._vocab_str_to_int = {
|
| 63 |
+
sep_token_str: 0,
|
| 64 |
+
eos_token_str: 1,
|
| 65 |
+
pad_token_str: 2,
|
| 66 |
+
unk_token_str: 3,
|
| 67 |
+
**{
|
| 68 |
+
ch: i + 4 for i, ch in enumerate(characters)
|
| 69 |
+
},
|
| 70 |
+
}
|
| 71 |
+
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
|
| 72 |
+
|
| 73 |
+
super().__init__(
|
| 74 |
+
eos_token=eos_token,
|
| 75 |
+
sep_token=sep_token,
|
| 76 |
+
pad_token=pad_token,
|
| 77 |
+
unk_token=unk_token,
|
| 78 |
+
add_prefix_space=False,
|
| 79 |
+
model_max_length=model_max_length,
|
| 80 |
+
**kwargs,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
self.chat_template = chat_template
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def vocab_size(self) -> int:
|
| 87 |
+
return len(self._vocab_str_to_int)
|
| 88 |
+
|
| 89 |
+
def get_vocab(self):
|
| 90 |
+
return self._vocab_str_to_int
|
| 91 |
+
|
| 92 |
+
def _tokenize(self, text: str) -> List[str]:
|
| 93 |
+
return list(text)
|
| 94 |
+
|
| 95 |
+
def _convert_token_to_id(self, token: str) -> int:
|
| 96 |
+
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["U"])
|
| 97 |
+
|
| 98 |
+
def _convert_id_to_token(self, index: int) -> str:
|
| 99 |
+
return self._vocab_int_to_str[index]
|
| 100 |
+
|
| 101 |
+
def convert_tokens_to_string(self, tokens):
|
| 102 |
+
return "".join(tokens)
|
| 103 |
+
|
| 104 |
+
def build_inputs_with_special_tokens(self,
|
| 105 |
+
token_ids_0: List[int],
|
| 106 |
+
token_ids_1: Optional[List[int]] = None) -> List[int]:
|
| 107 |
+
sep = [self.sep_token_id]
|
| 108 |
+
cls = [self.cls_token_id]
|
| 109 |
+
result = cls + token_ids_0 + sep
|
| 110 |
+
if token_ids_1 is not None:
|
| 111 |
+
result += token_ids_1 + sep
|
| 112 |
+
return result
|
| 113 |
+
|
| 114 |
+
def get_special_tokens_mask(
|
| 115 |
+
self,
|
| 116 |
+
token_ids_0: List[int],
|
| 117 |
+
token_ids_1: Optional[List[int]] = None,
|
| 118 |
+
already_has_special_tokens: bool = False,
|
| 119 |
+
) -> List[int]:
|
| 120 |
+
if already_has_special_tokens:
|
| 121 |
+
return super().get_special_tokens_mask(
|
| 122 |
+
token_ids_0=token_ids_0,
|
| 123 |
+
token_ids_1=token_ids_1,
|
| 124 |
+
already_has_special_tokens=True,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
result = [1] + ([0] * len(token_ids_0)) + [1]
|
| 128 |
+
if token_ids_1 is not None:
|
| 129 |
+
result += ([0] * len(token_ids_1)) + [1]
|
| 130 |
+
return result
|
| 131 |
+
|
| 132 |
+
def get_config(self) -> Dict:
|
| 133 |
+
return {
|
| 134 |
+
"char_ords": [ord(ch) for ch in self.characters],
|
| 135 |
+
"model_max_length": self.model_max_length,
|
| 136 |
+
"chat_template": self.chat_template
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
@classmethod
|
| 140 |
+
def from_config(cls, config: Dict) -> "DigitCompletionTokenizer":
|
| 141 |
+
cfg = {}
|
| 142 |
+
cfg["characters"] = [chr(i) for i in config["char_ords"]]
|
| 143 |
+
cfg["model_max_length"] = config["model_max_length"]
|
| 144 |
+
cfg["chat_template"] = config["chat_template"]
|
| 145 |
+
return cls(**cfg)
|
| 146 |
+
|
| 147 |
+
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
|
| 148 |
+
cfg_file = Path(save_directory) / "tokenizer_config.json"
|
| 149 |
+
cfg = self.get_config()
|
| 150 |
+
with open(cfg_file, "w") as f:
|
| 151 |
+
json.dump(cfg, f, indent=4)
|
| 152 |
+
|
| 153 |
+
@classmethod
|
| 154 |
+
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
|
| 155 |
+
cfg_file = Path(save_directory) / "tokenizer_config.json"
|
| 156 |
+
with open(cfg_file) as f:
|
| 157 |
+
cfg = json.load(f)
|
| 158 |
+
return cls.from_config(cfg)
|
deep_search/DeepResearcher/tests/e2e/run_deepseek_megatron.sh
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
# the config file used: verl/trainer/main_ppo/config/ppo_megatron_trainer.yaml
|
| 4 |
+
|
| 5 |
+
huggingface-cli download deepseek-ai/deepseek-coder-1.3b-instruct
|
| 6 |
+
|
| 7 |
+
python3 -m verl.trainer.main_ppo --config-path=config \
|
| 8 |
+
--config-name='ppo_megatron_trainer.yaml'\
|
| 9 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 10 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 11 |
+
data.train_batch_size=1024 \
|
| 12 |
+
data.max_prompt_length=512 \
|
| 13 |
+
data.max_response_length=512 \
|
| 14 |
+
actor_rollout_ref.model.path=deepseek-ai/deepseek-coder-1.3b-instruct \
|
| 15 |
+
actor_rollout_ref.actor.optim.lr=2e-6 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 18 |
+
actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \
|
| 19 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
|
| 20 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 21 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 22 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
|
| 23 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 24 |
+
actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \
|
| 25 |
+
critic.optim.lr=2e-5 \
|
| 26 |
+
critic.model.path=deepseek-ai/deepseek-coder-1.3b-instruct \
|
| 27 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 28 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 29 |
+
critic.megatron.tensor_model_parallel_size=2 \
|
| 30 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 31 |
+
trainer.critic_warmup=0 \
|
| 32 |
+
trainer.logger=['console'] \
|
| 33 |
+
trainer.project_name='verl_megatron_gsm8k_examples' \
|
| 34 |
+
trainer.experiment_name='deepseek_llm_1b3_function_rm' \
|
| 35 |
+
trainer.n_gpus_per_node=8 \
|
| 36 |
+
trainer.nnodes=1 \
|
| 37 |
+
trainer.save_freq=-1 \
|
| 38 |
+
trainer.test_freq=1 \
|
| 39 |
+
trainer.total_epochs=15 \
|
| 40 |
+
trainer.total_training_steps=3 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen2vl_geo3k_function_rm.sh
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/geo3k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/geo3k/test.parquet \
|
| 8 |
+
data.train_batch_size=128 \
|
| 9 |
+
data.max_prompt_length=1536 \
|
| 10 |
+
data.max_response_length=1536 \
|
| 11 |
+
data.image_key=images \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2-VL-2B-Instruct \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=128 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 17 |
+
actor_rollout_ref.actor.use_kl_loss=True \
|
| 18 |
+
actor_rollout_ref.actor.kl_loss_coef=0.001 \
|
| 19 |
+
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
|
| 20 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 21 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 22 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 23 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 24 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 25 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 26 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 27 |
+
actor_rollout_ref.rollout.enable_chunked_prefill=False \
|
| 28 |
+
actor_rollout_ref.rollout.enforce_eager=True \
|
| 29 |
+
actor_rollout_ref.rollout.free_cache_engine=False \
|
| 30 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 31 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 32 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 33 |
+
algorithm.adv_estimator=grpo \
|
| 34 |
+
trainer.critic_warmup=0 \
|
| 35 |
+
trainer.logger=['console'] \
|
| 36 |
+
trainer.project_name='verl_example_geo3k' \
|
| 37 |
+
trainer.experiment_name='qwen2vl_e2e_ci_function_rm' \
|
| 38 |
+
trainer.n_gpus_per_node=8 \
|
| 39 |
+
trainer.nnodes=1 \
|
| 40 |
+
trainer.save_freq=-1 \
|
| 41 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm.sh
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 12 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 13 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 16 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 17 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 18 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 19 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 20 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 21 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 22 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 23 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 24 |
+
critic.optim.lr=1e-5 \
|
| 25 |
+
critic.model.use_remove_padding=True \
|
| 26 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 27 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 28 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 29 |
+
critic.model.fsdp_config.param_offload=False \
|
| 30 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 31 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 32 |
+
trainer.critic_warmup=0 \
|
| 33 |
+
trainer.logger=['console'] \
|
| 34 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 35 |
+
trainer.experiment_name='qwen_e2e_ci_function_rm' \
|
| 36 |
+
trainer.n_gpus_per_node=8 \
|
| 37 |
+
trainer.nnodes=1 \
|
| 38 |
+
trainer.save_freq=1 \
|
| 39 |
+
trainer.default_local_dir=$HOME/ckpt/ \
|
| 40 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_grpo.sh
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 12 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 13 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 16 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 17 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 18 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 19 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 20 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 21 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 22 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 23 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 24 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 25 |
+
algorithm.adv_estimator=grpo \
|
| 26 |
+
trainer.critic_warmup=0 \
|
| 27 |
+
trainer.logger=['console'] \
|
| 28 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 29 |
+
trainer.experiment_name='qwen_e2e_ci_function_rm' \
|
| 30 |
+
trainer.n_gpus_per_node=8 \
|
| 31 |
+
trainer.nnodes=1 \
|
| 32 |
+
trainer.save_freq=-1 \
|
| 33 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_no_rmpad.sh
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 12 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 13 |
+
actor_rollout_ref.model.use_remove_padding=False \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 16 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 17 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 18 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 19 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 20 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 21 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 22 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 23 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 24 |
+
critic.optim.lr=1e-5 \
|
| 25 |
+
critic.model.use_remove_padding=False \
|
| 26 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 27 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 28 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 29 |
+
critic.model.fsdp_config.param_offload=False \
|
| 30 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 31 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 32 |
+
trainer.critic_warmup=0 \
|
| 33 |
+
trainer.logger=['console'] \
|
| 34 |
+
+trainer.val_before_train=False \
|
| 35 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 36 |
+
trainer.experiment_name='qwen_e2e_ci_function_rm' \
|
| 37 |
+
trainer.n_gpus_per_node=8 \
|
| 38 |
+
trainer.nnodes=1 \
|
| 39 |
+
trainer.save_freq=-1 \
|
| 40 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_function_rm_remax.sh
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 12 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 13 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 16 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 17 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 18 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 19 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 20 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 21 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 22 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 23 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 24 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 25 |
+
algorithm.adv_estimator=remax \
|
| 26 |
+
trainer.critic_warmup=0 \
|
| 27 |
+
trainer.logger=['console'] \
|
| 28 |
+
trainer.project_name='verl_example_gsm8k' \
|
| 29 |
+
trainer.experiment_name='qwen_e2e_ci_function_rm' \
|
| 30 |
+
trainer.n_gpus_per_node=8 \
|
| 31 |
+
trainer.nnodes=1 \
|
| 32 |
+
trainer.save_freq=-1 \
|
| 33 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
data.return_raw_chat=True \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 18 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 19 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 20 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 21 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 22 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 23 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 24 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 25 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 26 |
+
critic.optim.lr=1e-5 \
|
| 27 |
+
critic.model.use_remove_padding=True \
|
| 28 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 29 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 30 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 31 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 32 |
+
critic.model.fsdp_config.param_offload=False \
|
| 33 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 34 |
+
reward_model.enable=True \
|
| 35 |
+
reward_model.model.path=Qwen/Qwen2.5-0.5B\
|
| 36 |
+
reward_model.model.use_remove_padding=True \
|
| 37 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 38 |
+
reward_model.micro_batch_size_per_gpu=16 \
|
| 39 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 40 |
+
trainer.critic_warmup=0 \
|
| 41 |
+
trainer.logger=['console'] \
|
| 42 |
+
+trainer.val_before_train=False \
|
| 43 |
+
trainer.project_name='verl_example' \
|
| 44 |
+
trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm' \
|
| 45 |
+
trainer.n_gpus_per_node=8 \
|
| 46 |
+
trainer.nnodes=1 \
|
| 47 |
+
trainer.save_freq=-1 \
|
| 48 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_liger_kernel.sh
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
data.return_raw_chat=True \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
+actor_rollout_ref.model.use_liger=True \
|
| 16 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 18 |
+
actor_rollout_ref.actor.ppo_micro_batch_size=32 \
|
| 19 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 20 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 21 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size=128 \
|
| 22 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 23 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 24 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 25 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size=128 \
|
| 26 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 27 |
+
critic.optim.lr=1e-5 \
|
| 28 |
+
critic.model.use_remove_padding=True \
|
| 29 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 30 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 31 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 32 |
+
critic.ppo_micro_batch_size=32 \
|
| 33 |
+
critic.model.fsdp_config.param_offload=False \
|
| 34 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 35 |
+
reward_model.enable=True \
|
| 36 |
+
reward_model.model.path=Qwen/Qwen2.5-0.5B\
|
| 37 |
+
reward_model.model.use_remove_padding=True \
|
| 38 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 39 |
+
reward_model.micro_batch_size=16 \
|
| 40 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 41 |
+
trainer.critic_warmup=0 \
|
| 42 |
+
trainer.logger=['console'] \
|
| 43 |
+
+trainer.val_before_train=False \
|
| 44 |
+
trainer.project_name='verl_example' \
|
| 45 |
+
trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm' \
|
| 46 |
+
trainer.n_gpus_per_node=8 \
|
| 47 |
+
trainer.nnodes=1 \
|
| 48 |
+
trainer.save_freq=-1 \
|
| 49 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_no_rmpad.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
data.return_raw_chat=True \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=False \
|
| 15 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 18 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 19 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 20 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 21 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 22 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 23 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 24 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 25 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 26 |
+
critic.optim.lr=1e-5 \
|
| 27 |
+
critic.model.use_remove_padding=False \
|
| 28 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 29 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 30 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 31 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 32 |
+
critic.model.fsdp_config.param_offload=False \
|
| 33 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 34 |
+
reward_model.enable=True \
|
| 35 |
+
reward_model.model.path=Qwen/Qwen2.5-0.5B\
|
| 36 |
+
reward_model.model.use_remove_padding=False \
|
| 37 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 38 |
+
reward_model.micro_batch_size_per_gpu=16 \
|
| 39 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 40 |
+
trainer.critic_warmup=0 \
|
| 41 |
+
+trainer.val_before_train=False \
|
| 42 |
+
trainer.logger=['console'] \
|
| 43 |
+
trainer.project_name='verl_example' \
|
| 44 |
+
trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm' \
|
| 45 |
+
trainer.n_gpus_per_node=8 \
|
| 46 |
+
trainer.nnodes=1 \
|
| 47 |
+
trainer.save_freq=-1 \
|
| 48 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_seq_balance.sh
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
data.return_raw_chat=True \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 17 |
+
actor_rollout_ref.actor.use_dynamic_bsz=True \
|
| 18 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=12000 \
|
| 19 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 20 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 21 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 22 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 23 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 24 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=12000 \
|
| 25 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 26 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=12000 \
|
| 27 |
+
critic.optim.lr=1e-5 \
|
| 28 |
+
critic.model.use_remove_padding=True \
|
| 29 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 30 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 31 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 32 |
+
critic.use_dynamic_bsz=True \
|
| 33 |
+
critic.ppo_max_token_len_per_gpu=98304 \
|
| 34 |
+
critic.model.fsdp_config.param_offload=False \
|
| 35 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 36 |
+
reward_model.enable=True \
|
| 37 |
+
reward_model.model.path=Qwen/Qwen2.5-0.5B\
|
| 38 |
+
reward_model.model.use_remove_padding=True \
|
| 39 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 40 |
+
reward_model.use_dynamic_bsz=True \
|
| 41 |
+
reward_model.forward_max_token_len_per_gpu=98304 \
|
| 42 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 43 |
+
trainer.critic_warmup=0 \
|
| 44 |
+
trainer.logger=['console'] \
|
| 45 |
+
+trainer.val_before_train=False \
|
| 46 |
+
trainer.project_name='verl_example' \
|
| 47 |
+
trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm_seq_balance' \
|
| 48 |
+
trainer.n_gpus_per_node=8 \
|
| 49 |
+
trainer.nnodes=1 \
|
| 50 |
+
trainer.save_freq=-1 \
|
| 51 |
+
trainer.total_training_steps=1 $@
|
deep_search/DeepResearcher/tests/e2e/run_qwen_gsm8k_model_rm_ulysses.sh
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -x
|
| 2 |
+
|
| 3 |
+
export VLLM_ATTENTION_BACKEND=XFORMERS # vllm + qwen2 with flash_attn has some issues
|
| 4 |
+
|
| 5 |
+
python3 -m verl.trainer.main_ppo \
|
| 6 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 7 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 8 |
+
data.train_batch_size=1024 \
|
| 9 |
+
data.max_prompt_length=512 \
|
| 10 |
+
data.max_response_length=512 \
|
| 11 |
+
data.return_raw_chat=True \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 15 |
+
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
|
| 16 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
| 17 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
| 18 |
+
actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \
|
| 19 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=False \
|
| 20 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
|
| 21 |
+
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
|
| 22 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
|
| 23 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 24 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 25 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 26 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
|
| 27 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=True \
|
| 28 |
+
critic.optim.lr=1e-5 \
|
| 29 |
+
critic.ulysses_sequence_parallel_size=2 \
|
| 30 |
+
critic.model.use_remove_padding=True \
|
| 31 |
+
critic.optim.lr_warmup_steps_ratio=0.05 \
|
| 32 |
+
critic.model.path=Qwen/Qwen2.5-0.5B \
|
| 33 |
+
critic.model.enable_gradient_checkpointing=False \
|
| 34 |
+
critic.ppo_micro_batch_size_per_gpu=4 \
|
| 35 |
+
critic.model.fsdp_config.param_offload=False \
|
| 36 |
+
critic.model.fsdp_config.optimizer_offload=False \
|
| 37 |
+
critic.model.fsdp_config.fsdp_size=-1 \
|
| 38 |
+
reward_model.enable=True \
|
| 39 |
+
reward_model.ulysses_sequence_parallel_size=2 \
|
| 40 |
+
reward_model.model.path=Qwen/Qwen2.5-0.5B\
|
| 41 |
+
reward_model.model.use_remove_padding=True \
|
| 42 |
+
reward_model.model.fsdp_config.param_offload=True \
|
| 43 |
+
reward_model.micro_batch_size_per_gpu=16 \
|
| 44 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 45 |
+
trainer.critic_warmup=0 \
|
| 46 |
+
+trainer.val_before_train=False \
|
| 47 |
+
trainer.logger=['console'] \
|
| 48 |
+
trainer.project_name='verl_example' \
|
| 49 |
+
trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm_sp2' \
|
| 50 |
+
trainer.n_gpus_per_node=8 \
|
| 51 |
+
trainer.nnodes=1 \
|
| 52 |
+
trainer.save_freq=-1 \
|
| 53 |
+
trainer.total_training_steps=1 $@
|